diff --git a/CAPI/cpp/.gitignore b/CAPI/cpp/.gitignore index 291cf41b..5542e213 100755 --- a/CAPI/cpp/.gitignore +++ b/CAPI/cpp/.gitignore @@ -23,6 +23,7 @@ mono_crash.* [Rr]eleases/ x64/ x86/ +cmake/ [Ww][Ii][Nn]32/ [Aa][Rr][Mm]/ [Aa][Rr][Mm]64/ diff --git a/CAPI/cpp/API/API.vcxproj b/CAPI/cpp/API/API.vcxproj index 6b9f8a97..fc7ac859 100755 --- a/CAPI/cpp/API/API.vcxproj +++ b/CAPI/cpp/API/API.vcxproj @@ -143,7 +143,7 @@ Console true ..\lib\debug;%(AdditionalLibraryDirectories) - absl_bad_any_cast_impl.lib;absl_bad_optional_access.lib;absl_bad_variant_access.lib;absl_base.lib;absl_city.lib;absl_civil_time.lib;absl_cord.lib;absl_cordz_functions.lib;absl_cordz_handle.lib;absl_cordz_info.lib;absl_cordz_sample_token.lib;absl_cord_internal.lib;absl_debugging_internal.lib;absl_demangle_internal.lib;absl_examine_stack.lib;absl_exponential_biased.lib;absl_failure_signal_handler.lib;absl_flags.lib;absl_flags_commandlineflag.lib;absl_flags_commandlineflag_internal.lib;absl_flags_config.lib;absl_flags_internal.lib;absl_flags_marshalling.lib;absl_flags_parse.lib;absl_flags_private_handle_accessor.lib;absl_flags_program_name.lib;absl_flags_reflection.lib;absl_flags_usage.lib;absl_flags_usage_internal.lib;absl_graphcycles_internal.lib;absl_hash.lib;absl_hashtablez_sampler.lib;absl_int128.lib;absl_leak_check.lib;absl_log_severity.lib;absl_low_level_hash.lib;absl_malloc_internal.lib;absl_periodic_sampler.lib;absl_random_distributions.lib;absl_random_internal_distribution_test_util.lib;absl_random_internal_platform.lib;absl_random_internal_pool_urbg.lib;absl_random_internal_randen.lib;absl_random_internal_randen_hwaes.lib;absl_random_internal_randen_hwaes_impl.lib;absl_random_internal_randen_slow.lib;absl_random_internal_seed_material.lib;absl_random_seed_gen_exception.lib;absl_random_seed_sequences.lib;absl_raw_hash_set.lib;absl_raw_logging_internal.lib;absl_scoped_set_env.lib;absl_spinlock_wait.lib;absl_stacktrace.lib;absl_status.lib;absl_statusor.lib;absl_strerror.lib;absl_strings.lib;absl_strings_internal.lib;absl_str_format_internal.lib;absl_symbolize.lib;absl_synchronization.lib;absl_throw_delegate.lib;absl_time.lib;absl_time_zone.lib;address_sorting.lib;cares.lib;descriptor_upb_proto.lib;gpr.lib;grpc++.lib;grpc++_alts.lib;grpc++_error_details.lib;grpc++_unsecure.lib;grpc.lib;grpc_plugin_support.lib;grpc_unsecure.lib;libcrypto.lib;libprotobuf-lited.lib;libprotobufd.lib;libprotocd.lib;libssl.lib;re2.lib;upb.lib;upb_collections.lib;upb_extension_registry.lib;upb_fastdecode.lib;upb_json.lib;upb_mini_table.lib;upb_reflection.lib;upb_textformat.lib;upb_utf8_range.lib;zlibd.lib;Ws2_32.lib;Crypt32.lib;Iphlpapi.lib;%(AdditionalDependencies) + absl_bad_any_cast_impl.lib;absl_bad_optional_access.lib;absl_bad_variant_access.lib;absl_base.lib;absl_city.lib;absl_civil_time.lib;absl_cord.lib;absl_cordz_functions.lib;absl_cordz_handle.lib;absl_cordz_info.lib;absl_cordz_sample_token.lib;absl_cord_internal.lib;absl_crc32c.lib;absl_crc_cord_state.lib;absl_crc_cpu_detect.lib;absl_crc_internal.lib;absl_debugging_internal.lib;absl_demangle_internal.lib;absl_die_if_null.lib;absl_examine_stack.lib;absl_exponential_biased.lib;absl_failure_signal_handler.lib;absl_flags.lib;absl_flags_commandlineflag.lib;absl_flags_commandlineflag_internal.lib;absl_flags_config.lib;absl_flags_internal.lib;absl_flags_marshalling.lib;absl_flags_parse.lib;absl_flags_private_handle_accessor.lib;absl_flags_program_name.lib;absl_flags_reflection.lib;absl_flags_usage.lib;absl_flags_usage_internal.lib;absl_graphcycles_internal.lib;absl_hash.lib;absl_hashtablez_sampler.lib;absl_int128.lib;absl_kernel_timeout_internal.lib;absl_leak_check.lib;absl_log_entry.lib;absl_log_flags.lib;absl_log_globals.lib;absl_log_initialize.lib;absl_log_internal_check_op.lib;absl_log_internal_conditions.lib;absl_log_internal_format.lib;absl_log_internal_globals.lib;absl_log_internal_log_sink_set.lib;absl_log_internal_message.lib;absl_log_internal_nullguard.lib;absl_log_internal_proto.lib;absl_log_severity.lib;absl_log_sink.lib;absl_low_level_hash.lib;absl_malloc_internal.lib;absl_periodic_sampler.lib;absl_random_distributions.lib;absl_random_internal_distribution_test_util.lib;absl_random_internal_platform.lib;absl_random_internal_pool_urbg.lib;absl_random_internal_randen.lib;absl_random_internal_randen_hwaes.lib;absl_random_internal_randen_hwaes_impl.lib;absl_random_internal_randen_slow.lib;absl_random_internal_seed_material.lib;absl_random_seed_gen_exception.lib;absl_random_seed_sequences.lib;absl_raw_hash_set.lib;absl_raw_logging_internal.lib;absl_scoped_set_env.lib;absl_spinlock_wait.lib;absl_stacktrace.lib;absl_status.lib;absl_statusor.lib;absl_strerror.lib;absl_strings.lib;absl_strings_internal.lib;absl_string_view.lib;absl_str_format_internal.lib;absl_symbolize.lib;absl_synchronization.lib;absl_throw_delegate.lib;absl_time.lib;absl_time_zone.lib;address_sorting.lib;cares.lib;descriptor_upb_proto.lib;gpr.lib;grpc++.lib;grpc++_alts.lib;grpc++_error_details.lib;grpc++_unsecure.lib;grpc.lib;grpc_plugin_support.lib;grpc_unsecure.lib;libcrypto.lib;libprotobuf-lited.lib;libprotobufd.lib;libprotocd.lib;libssl.lib;re2.lib;upb.lib;upb_collections.lib;upb_extension_registry.lib;upb_fastdecode.lib;upb_json.lib;upb_mini_table.lib;upb_reflection.lib;upb_textformat.lib;upb_utf8_range.lib;zlibd.lib;ws2_32.lib;Crypt32.lib;%(AdditionalDependencies) @@ -166,7 +166,7 @@ true true true - absl_bad_any_cast_impl.lib;absl_bad_optional_access.lib;absl_bad_variant_access.lib;absl_base.lib;absl_city.lib;absl_civil_time.lib;absl_cord.lib;absl_cordz_functions.lib;absl_cordz_handle.lib;absl_cordz_info.lib;absl_cordz_sample_token.lib;absl_cord_internal.lib;absl_debugging_internal.lib;absl_demangle_internal.lib;absl_examine_stack.lib;absl_exponential_biased.lib;absl_failure_signal_handler.lib;absl_flags.lib;absl_flags_commandlineflag.lib;absl_flags_commandlineflag_internal.lib;absl_flags_config.lib;absl_flags_internal.lib;absl_flags_marshalling.lib;absl_flags_parse.lib;absl_flags_private_handle_accessor.lib;absl_flags_program_name.lib;absl_flags_reflection.lib;absl_flags_usage.lib;absl_flags_usage_internal.lib;absl_graphcycles_internal.lib;absl_hash.lib;absl_hashtablez_sampler.lib;absl_int128.lib;absl_leak_check.lib;absl_log_severity.lib;absl_low_level_hash.lib;absl_malloc_internal.lib;absl_periodic_sampler.lib;absl_random_distributions.lib;absl_random_internal_distribution_test_util.lib;absl_random_internal_platform.lib;absl_random_internal_pool_urbg.lib;absl_random_internal_randen.lib;absl_random_internal_randen_hwaes.lib;absl_random_internal_randen_hwaes_impl.lib;absl_random_internal_randen_slow.lib;absl_random_internal_seed_material.lib;absl_random_seed_gen_exception.lib;absl_random_seed_sequences.lib;absl_raw_hash_set.lib;absl_raw_logging_internal.lib;absl_scoped_set_env.lib;absl_spinlock_wait.lib;absl_stacktrace.lib;absl_status.lib;absl_statusor.lib;absl_strerror.lib;absl_strings.lib;absl_strings_internal.lib;absl_str_format_internal.lib;absl_symbolize.lib;absl_synchronization.lib;absl_throw_delegate.lib;absl_time.lib;absl_time_zone.lib;address_sorting.lib;cares.lib;descriptor_upb_proto.lib;gpr.lib;grpc++.lib;grpc++_alts.lib;grpc++_error_details.lib;grpc++_unsecure.lib;grpc.lib;grpc_plugin_support.lib;grpc_unsecure.lib;libcrypto.lib;libprotobuf-lite.lib;libprotobuf.lib;libprotoc.lib;libssl.lib;re2.lib;upb.lib;upb_collections.lib;upb_extension_registry.lib;upb_fastdecode.lib;upb_json.lib;upb_mini_table.lib;upb_reflection.lib;upb_textformat.lib;upb_utf8_range.lib;zlib.lib;Ws2_32.lib;Crypt32.lib;Iphlpapi.lib;%(AdditionalDependencies) + absl_bad_any_cast_impl.lib;absl_bad_optional_access.lib;absl_bad_variant_access.lib;absl_base.lib;absl_city.lib;absl_civil_time.lib;absl_cord.lib;absl_cordz_functions.lib;absl_cordz_handle.lib;absl_cordz_info.lib;absl_cordz_sample_token.lib;absl_cord_internal.lib;absl_crc32c.lib;absl_crc_cord_state.lib;absl_crc_cpu_detect.lib;absl_crc_internal.lib;absl_debugging_internal.lib;absl_demangle_internal.lib;absl_die_if_null.lib;absl_examine_stack.lib;absl_exponential_biased.lib;absl_failure_signal_handler.lib;absl_flags.lib;absl_flags_commandlineflag.lib;absl_flags_commandlineflag_internal.lib;absl_flags_config.lib;absl_flags_internal.lib;absl_flags_marshalling.lib;absl_flags_parse.lib;absl_flags_private_handle_accessor.lib;absl_flags_program_name.lib;absl_flags_reflection.lib;absl_flags_usage.lib;absl_flags_usage_internal.lib;absl_graphcycles_internal.lib;absl_hash.lib;absl_hashtablez_sampler.lib;absl_int128.lib;absl_kernel_timeout_internal.lib;absl_leak_check.lib;absl_log_entry.lib;absl_log_flags.lib;absl_log_globals.lib;absl_log_initialize.lib;absl_log_internal_check_op.lib;absl_log_internal_conditions.lib;absl_log_internal_format.lib;absl_log_internal_globals.lib;absl_log_internal_log_sink_set.lib;absl_log_internal_message.lib;absl_log_internal_nullguard.lib;absl_log_internal_proto.lib;absl_log_severity.lib;absl_log_sink.lib;absl_low_level_hash.lib;absl_malloc_internal.lib;absl_periodic_sampler.lib;absl_random_distributions.lib;absl_random_internal_distribution_test_util.lib;absl_random_internal_platform.lib;absl_random_internal_pool_urbg.lib;absl_random_internal_randen.lib;absl_random_internal_randen_hwaes.lib;absl_random_internal_randen_hwaes_impl.lib;absl_random_internal_randen_slow.lib;absl_random_internal_seed_material.lib;absl_random_seed_gen_exception.lib;absl_random_seed_sequences.lib;absl_raw_hash_set.lib;absl_raw_logging_internal.lib;absl_scoped_set_env.lib;absl_spinlock_wait.lib;absl_stacktrace.lib;absl_status.lib;absl_statusor.lib;absl_strerror.lib;absl_strings.lib;absl_strings_internal.lib;absl_string_view.lib;absl_str_format_internal.lib;absl_symbolize.lib;absl_synchronization.lib;absl_throw_delegate.lib;absl_time.lib;absl_time_zone.lib;address_sorting.lib;cares.lib;descriptor_upb_proto.lib;gpr.lib;grpc++.lib;grpc++_alts.lib;grpc++_error_details.lib;grpc++_unsecure.lib;grpc.lib;grpc_plugin_support.lib;grpc_unsecure.lib;libcrypto.lib;libprotobuf-lite.lib;libprotobuf.lib;libprotoc.lib;libssl.lib;re2.lib;upb.lib;upb_collections.lib;upb_extension_registry.lib;upb_fastdecode.lib;upb_json.lib;upb_mini_table.lib;upb_reflection.lib;upb_textformat.lib;upb_utf8_range.lib;zlib.lib;ws2_32.lib;Crypt32.lib;%(AdditionalDependencies) ..\lib\release;%(AdditionalLibraryDirectories) diff --git a/CAPI/cpp/API/include/logic.h b/CAPI/cpp/API/include/logic.h index 689dbb33..70c73b81 100755 --- a/CAPI/cpp/API/include/logic.h +++ b/CAPI/cpp/API/include/logic.h @@ -51,7 +51,6 @@ class Logic : public ILogic int64_t teamID; THUAI7::PlayerTeam playerTeam; THUAI7::ShipType shipType; - std::unique_ptr timer; std::thread tAI; // 用于运行AI的线程 diff --git a/CAPI/cpp/API/include/state.h b/CAPI/cpp/API/include/state.h index a37e8bf9..e7badfe6 100755 --- a/CAPI/cpp/API/include/state.h +++ b/CAPI/cpp/API/include/state.h @@ -19,10 +19,10 @@ struct State // 自身信息,根据playerType的不同,可以调用的值也不同。 std::shared_ptr shipSelf; std::shared_ptr teamSelf; - std::vector> ships; - std::vector> enemyships; - std::vector> teams; + std::vector> enemyShips; + std::shared_ptr enemyTeam; + std::vector> bullets; std::vector> gameMap; diff --git a/CAPI/cpp/API/include/utils.hpp b/CAPI/cpp/API/include/utils.hpp index 1ef88575..8e058bc3 100755 --- a/CAPI/cpp/API/include/utils.hpp +++ b/CAPI/cpp/API/include/utils.hpp @@ -289,7 +289,11 @@ namespace Proto2THUAI7 auto gameInfo = std::make_shared(); gameInfo->gameTime = allMsg.game_time(); gameInfo->redScore = allMsg.red_team_score(); + gameInfo->redMoney = allMsg.red_team_score(); + gameInfo->redHomeHp = allMsg.red_home_hp(); gameInfo->blueScore = allMsg.blue_team_score(); + gameInfo->blueMoney = allMsg.blue_team_score(); + gameInfo->blueHomeHp = allMsg.blue_home_hp(); return gameInfo; } } // namespace Proto2THUAI7 diff --git a/CAPI/cpp/API/src/AI.cpp b/CAPI/cpp/API/src/AI.cpp index bb5b42e9..5cee0d82 100755 --- a/CAPI/cpp/API/src/AI.cpp +++ b/CAPI/cpp/API/src/AI.cpp @@ -4,12 +4,10 @@ #include "AI.h" #include "constants.h" // 注意不要使用conio.h,Windows.h等非标准库 - // 为假则play()期间确保游戏状态不更新,为真则只保证游戏状态在调用相关方法时不更新,大致一帧更新一次 extern const bool asynchronous = false; // 选手需要依次将player1到player4的船类型在这里定义 - extern const std::array shipTypeDict = { THUAI7::ShipType::CivilianShip, THUAI7::ShipType::MilitaryShip, @@ -21,10 +19,7 @@ extern const std::array shipTypeDict = { void AI::play(IShipAPI& api) { - if (this->playerID == 0) - { - } - else if (this->playerID == 1) + if (this->playerID == 1) { // 玩家1执行操作 } diff --git a/CAPI/cpp/API/src/logic.cpp b/CAPI/cpp/API/src/logic.cpp index 76fe381a..187de80a 100755 --- a/CAPI/cpp/API/src/logic.cpp +++ b/CAPI/cpp/API/src/logic.cpp @@ -19,7 +19,6 @@ Logic::Logic(int64_t pID, int64_t tID, THUAI7::PlayerType pType, THUAI7::ShipTyp teamID(tID), playerType(pType), shipType(sType) - { currentState = &state[0]; bufferState = &state[1]; @@ -27,9 +26,9 @@ Logic::Logic(int64_t pID, int64_t tID, THUAI7::PlayerType pType, THUAI7::ShipTyp currentState->mapInfo = std::make_shared(); bufferState->gameInfo = std::make_shared(); bufferState->mapInfo = std::make_shared(); - if (teamID == 1) + if (teamID == 0) playerTeam = THUAI7::PlayerTeam::Red; - if (teamID == 2) + if (teamID == 1) playerTeam = THUAI7::PlayerTeam::Blue; } @@ -44,7 +43,7 @@ std::vector> Logic::GetShips() const std::vector> Logic::GetEnemyShips() const { std::unique_lock lock(mtxState); - std::vector> temp(currentState->enemyships.begin(), currentState->enemyships.end()); + std::vector> temp(currentState->enemyShips.begin(), currentState->enemyShips.end()); logger->debug("Called GetEnemyShip"); return temp; } @@ -380,20 +379,20 @@ void Logic::LoadBufferSelf(const protobuf::MessageToClient& message) { for (const auto& item : message.obj_message()) { - if (Proto2THUAI7::messageOfObjDict[item.message_of_obj_case()] == THUAI7::MessageOfObj::ShipMessage) + if (Proto2THUAI7::messageOfObjDict[item.message_of_obj_case()] == THUAI7::MessageOfObj::ShipMessage && item.ship_message().team_id() == teamID) { if (item.ship_message().player_id() == playerID) { bufferState->shipSelf = Proto2THUAI7::Protobuf2THUAI7Ship(item.ship_message()); bufferState->ships.push_back(bufferState->shipSelf); + logger->debug("Add Self Ship!"); } else { std::shared_ptr ship = Proto2THUAI7::Protobuf2THUAI7Ship(item.ship_message()); - if (ship->teamID == teamID) - bufferState->ships.push_back(ship); + bufferState->ships.push_back(ship); + logger->debug("Add Ship!"); } - logger->debug("Add Ship!"); } } } @@ -403,12 +402,16 @@ void Logic::LoadBufferSelf(const protobuf::MessageToClient& message) { if (Proto2THUAI7::messageOfObjDict[item.message_of_obj_case()] == THUAI7::MessageOfObj::TeamMessage) { - if (item.team_message().player_id() == playerID) + if (item.team_message().team_id() == teamID) { bufferState->teamSelf = Proto2THUAI7::Protobuf2THUAI7Team(item.team_message()); - bufferState->teams.push_back(bufferState->teamSelf); + logger->debug("Add Self Team!"); + } + else + { + bufferState->enemyTeam = Proto2THUAI7::Protobuf2THUAI7Team(item.team_message()); + logger->debug("Add Enemy Team!"); } - logger->debug("Add Team!"); } } } @@ -427,7 +430,7 @@ void Logic::LoadBufferCase(const protobuf::MessageOfObj& item) { if (AssistFunction::HaveView(x, y, item.ship_message().x(), item.ship_message().y(), viewRange, bufferState->gameMap)) { - bufferState->enemyships.push_back(Proto2THUAI7::Protobuf2THUAI7Ship(item.ship_message())); + bufferState->enemyShips.push_back(Proto2THUAI7::Protobuf2THUAI7Ship(item.ship_message())); logger->debug("Add Enemyship!"); } } @@ -440,7 +443,21 @@ void Logic::LoadBufferCase(const protobuf::MessageOfObj& item) } break; case THUAI7::MessageOfObj::FactoryMessage: - if (AssistFunction::HaveView(x, y, item.factory_message().x(), item.factory_message().y(), viewRange, bufferState->gameMap)) + if (item.factory_message().team_id() == teamID) + { + auto pos = std::make_pair(AssistFunction::GridToCell(item.factory_message().x()), AssistFunction::GridToCell(item.factory_message().y())); + if (bufferState->mapInfo->factoryState.count(pos) == 0) + { + bufferState->mapInfo->factoryState.emplace(pos, std::make_pair(item.factory_message().team_id(), item.factory_message().hp())); + logger->debug("Add Factory!"); + } + else + { + bufferState->mapInfo->factoryState[pos].second = item.factory_message().hp(); + logger->debug("Update Factory!"); + } + } + else if (AssistFunction::HaveView(x, y, item.factory_message().x(), item.factory_message().y(), viewRange, bufferState->gameMap)) { auto pos = std::make_pair(AssistFunction::GridToCell(item.factory_message().x()), AssistFunction::GridToCell(item.factory_message().y())); if (bufferState->mapInfo->factoryState.count(pos) == 0) @@ -450,13 +467,27 @@ void Logic::LoadBufferCase(const protobuf::MessageOfObj& item) } else { - bufferState->mapInfo->factoryState[pos].first = item.factory_message().hp(); + bufferState->mapInfo->factoryState[pos].second = item.factory_message().hp(); logger->debug("Update Factory!"); } } break; case THUAI7::MessageOfObj::CommunityMessage: - if (AssistFunction::HaveView(x, y, item.community_message().x(), item.community_message().y(), viewRange, bufferState->gameMap)) + if (item.community_message().team_id() == teamID) + { + auto pos = std::make_pair(AssistFunction::GridToCell(item.community_message().x()), AssistFunction::GridToCell(item.community_message().y())); + if (bufferState->mapInfo->communityState.count(pos) == 0) + { + bufferState->mapInfo->communityState.emplace(pos, std::make_pair(item.community_message().team_id(), item.community_message().hp())); + logger->debug("Add Community!"); + } + else + { + bufferState->mapInfo->communityState[pos].second = item.community_message().hp(); + logger->debug("Update Community!"); + } + } + else if (AssistFunction::HaveView(x, y, item.community_message().x(), item.community_message().y(), viewRange, bufferState->gameMap)) { auto pos = std::make_pair(AssistFunction::GridToCell(item.community_message().x()), AssistFunction::GridToCell(item.community_message().y())); if (bufferState->mapInfo->communityState.count(pos) == 0) @@ -466,13 +497,27 @@ void Logic::LoadBufferCase(const protobuf::MessageOfObj& item) } else { - bufferState->mapInfo->communityState[pos].first = item.community_message().hp(); + bufferState->mapInfo->communityState[pos].second = item.community_message().hp(); logger->debug("Update Community!"); } } break; case THUAI7::MessageOfObj::FortMessage: - if (AssistFunction::HaveView(x, y, item.fort_message().x(), item.fort_message().y(), viewRange, bufferState->gameMap)) + if (item.fort_message().team_id() == teamID) + { + auto pos = std::make_pair(AssistFunction::GridToCell(item.fort_message().x()), AssistFunction::GridToCell(item.fort_message().y())); + if (bufferState->mapInfo->fortState.count(pos) == 0) + { + bufferState->mapInfo->fortState.emplace(pos, std::make_pair(item.fort_message().team_id(), item.fort_message().hp())); + logger->debug("Add Fort!"); + } + else + { + bufferState->mapInfo->fortState[pos].second = item.fort_message().hp(); + logger->debug("Update Fort!"); + } + } + else if (AssistFunction::HaveView(x, y, item.fort_message().x(), item.fort_message().y(), viewRange, bufferState->gameMap)) { auto pos = std::make_pair(AssistFunction::GridToCell(item.fort_message().x()), AssistFunction::GridToCell(item.fort_message().y())); if (bufferState->mapInfo->fortState.count(pos) == 0) @@ -482,7 +527,7 @@ void Logic::LoadBufferCase(const protobuf::MessageOfObj& item) } else { - bufferState->mapInfo->fortState[pos].first = item.fort_message().hp(); + bufferState->mapInfo->fortState[pos].second = item.fort_message().hp(); logger->debug("Update Fort!"); } } @@ -554,8 +599,7 @@ void Logic::LoadBuffer(const protobuf::MessageToClient& message) // 清空原有信息 bufferState->ships.clear(); - bufferState->enemyships.clear(); - bufferState->teams.clear(); + bufferState->enemyShips.clear(); bufferState->bullets.clear(); bufferState->guids.clear(); @@ -678,8 +722,6 @@ void Logic::Main(CreateAIFunc createAI, std::string IP, std::string port, bool f logger->info("server: {}:{}", IP, port); if (playerType == THUAI7::PlayerType::Ship) logger->info("ship ID: {}", playerID); - else - logger->info("home ID: {}", playerID); logger->info("player team: {}", THUAI7::playerTeamDict[playerTeam]); logger->info("****************************"); @@ -687,39 +729,19 @@ void Logic::Main(CreateAIFunc createAI, std::string IP, std::string port, bool f pComm = std::make_unique(IP, port); // 构造timer - if (playerTeam == THUAI7::PlayerTeam::Red) + if (playerType == THUAI7::PlayerType::Ship) { - if (playerType == THUAI7::PlayerType::Ship) - { - if (!file && !print) - timer = std::make_unique(*this); - else - timer = std::make_unique(*this, file, print, warnOnly, playerID); - } + if (!file && !print) + timer = std::make_unique(*this); else - { - if (!file && !print) - timer = std::make_unique(*this); - else - timer = std::make_unique(*this, file, print, warnOnly, playerID); - } + timer = std::make_unique(*this, file, print, warnOnly, playerID); } - else if (playerTeam == THUAI7::PlayerTeam::Blue) + else { - if (playerType == THUAI7::PlayerType::Ship) - { - if (!file && !print) - timer = std::make_unique(*this); - else - timer = std::make_unique(*this, file, print, warnOnly, playerID); - } + if (!file && !print) + timer = std::make_unique(*this); else - { - if (!file && !print) - timer = std::make_unique(*this); - else - timer = std::make_unique(*this, file, print, warnOnly, playerID); - } + timer = std::make_unique(*this, file, print, warnOnly, playerID); } // 构造AI线程 @@ -770,7 +792,6 @@ void Logic::Main(CreateAIFunc createAI, std::string IP, std::string port, bool f if (tAI.joinable()) { logger->info("Join the AI thread!"); - // 首先开启处理消息的线程 ProcessMessage(); tAI.join(); diff --git a/CAPI/cpp/API/src/main.cpp b/CAPI/cpp/API/src/main.cpp index 62175243..ff8d180a 100755 --- a/CAPI/cpp/API/src/main.cpp +++ b/CAPI/cpp/API/src/main.cpp @@ -65,7 +65,7 @@ int THUAI7Main(int argc, char** argv, CreateAIFunc AIBuilder) TCLAP::ValueArg teamID("t", "teamID", "Team ID 0,1 valid only", true, -1, &teamIdConstraint); cmd.add(teamID); - std::vector validPlayerIDs{0, 1, 2, 3, 4}; + std::vector validPlayerIDs{0, 1, 2, 3, 4}; // 0代表team TCLAP::ValuesConstraint playerIdConstraint(validPlayerIDs); TCLAP::ValueArg playerID("p", "playerID", "Player ID 0,1,2,3,4 valid only", true, -1, &playerIdConstraint); cmd.add(playerID); diff --git a/CAPI/cpp/README.md b/CAPI/cpp/README.md index 3f9593fe..3c262c96 100755 --- a/CAPI/cpp/README.md +++ b/CAPI/cpp/README.md @@ -73,7 +73,7 @@ C++ 通信组件与选手接口 - 设置生成方式为静态生成:在项目属性的“C/C++”的“代码生成”的“运行库”中,[Debug 下设置其为“多线程调试(`/MTd`)”](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/API/API.vcxproj#L137),[Release 下设置其为“多线程(`/MT`)”](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/API/API.vcxproj#L158) - 将之前提取的 Debug 和 Release 的 `.lib` 分别放在项目中的单独的文件夹里(THUAI6 使用的是 `CAPI\cpp\lib\debug` 和 `CAPI\cpp\lib\release`),并[使用 `.gitignore` 忽略掉](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/.gitignore#L502) - 在项目属性的“链接器”的首页的“附加库目录”中分别配置 Debug 和 Release 的 [`.lib` 文件的相应路径](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/API/API.vcxproj#L166) - - 在项目属性中的“链接器”的“输入”的“附加依赖库”中分别配置 Debug 和 Release [所需要链接的库的文件名](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/API/API.vcxproj#L165)。注意 Debug 和 Release 链接的库可能并不完全相同,建议在 cmd 中使用 `dir /b` 将其自动列举并复制。还需要注意需要手动指定链接一些 Windows 自带的 `lib`,例如 `Ws2_32.lib`、`Crypt32.lib`、`Iphlpapi.lib` 等。如果生成过程中不通过,表示找不到一些函数,则在 Google 中搜索该函数,如果发现是 Windows 系统的 API 函数则会搜到[微软官方文档](https://learn.microsoft.com) 的对应链接的页面,则在页面最下方会表明它所在的 `.lib`(例如 [`CreateProcessA` 的页面](https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessa#requirements)),加进去即可 + - 在项目属性中的“链接器”的“输入”的“附加依赖库”中分别配置 Debug 和 Release [所需要链接的库的文件名](https://github.com/eesast/THUAI6/blob/c8e1fbe299c67a6e101fa02e85bcc971acd0f48b/CAPI/cpp/API/API.vcxproj#L165)。注意 Debug 和 Release 链接的库可能并不完全相同,建议在 cmd 中使用 `dir /b` 将其自动列举并复制。还需要注意需要手动指定链接一些 Windows 自带的 `lib`,例如 `ws2_32.lib`、`Crypt32.lib`、`Iphlpapi.lib` 等。如果生成过程中不通过,表示找不到一些函数,则在 Google 中搜索该函数,如果发现是 Windows 系统的 API 函数则会搜到[微软官方文档](https://learn.microsoft.com) 的对应链接的页面,则在页面最下方会表明它所在的 `.lib`(例如 [`CreateProcessA` 的页面](https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessa#requirements)),加进去即可 - 然后进行生成解决方案。如果感觉编译的速度过慢,可以在项目属性的 `C/C++` 的“所有选项”中搜索多处理器编译,并[开启(`/MP`)](https://github.com/eesast/THUAI6/blob/ad4db599f97449786e6c910940bf4f69224d5408/CAPI/cpp/API/API.vcxproj#L162) - 然后开始运行。如果提示缺少一些 DLL,可以把之前保存的 `.dll` 文件(如果有的话)放在与 `.exe` 相同的目录下。该目录为**与 `.sln` 相同目录的**(不是与 `.vcxproj` 相同目录的)`x64\Debug` 和 `x64\Release` - 如果 x64 的 Debug 和 x64 的 Release 均生成成功,那么找一台没配过的电脑再试一次 diff --git a/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h new file mode 100644 index 00000000..865b6d17 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h @@ -0,0 +1,162 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: algorithm.h +// ----------------------------------------------------------------------------- +// +// This header file contains Google extensions to the standard C++ +// header. + +#ifndef ABSL_ALGORITHM_ALGORITHM_H_ +#define ABSL_ALGORITHM_ALGORITHM_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace algorithm_internal + { + + // Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. + struct EqualTo + { + template + bool operator()(const T& a, const U& b) const + { + return a == b; + } + }; + + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, std::input_iterator_tag, std::input_iterator_tag) + { + while (true) + { + if (first1 == last1) + return first2 == last2; + if (first2 == last2) + return false; + if (!pred(*first1, *first2)) + return false; + ++first1; + ++first2; + } + } + + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, std::random_access_iterator_tag) + { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2, std::forward(pred)); + } + + // When we are using our own internal predicate that just applies operator==, we + // forward to the non-predicate form of std::equal. This enables an optimization + // in libstdc++ that can result in std::memcmp being used for integer types. + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, algorithm_internal::EqualTo /* unused */, std::random_access_iterator_tag, std::random_access_iterator_tag) + { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2); + } + + template + It RotateImpl(It first, It middle, It last, std::true_type) + { + return std::rotate(first, middle, last); + } + + template + It RotateImpl(It first, It middle, It last, std::false_type) + { + std::rotate(first, middle, last); + return std::next(first, std::distance(middle, last)); + } + + } // namespace algorithm_internal + + // equal() + // + // Compares the equality of two ranges specified by pairs of iterators, using + // the given predicate, returning true iff for each corresponding iterator i1 + // and i2 in the first and second range respectively, pred(*i1, *i2) == true + // + // This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) + // invocations of the predicate. Additionally, if InputIter1 and InputIter2 are + // both random-access iterators, and `last1` - `first1` != `last2` - `first2`, + // then the predicate is never invoked and the function returns false. + // + // This is a C++11-compatible implementation of C++14 `std::equal`. See + // https://en.cppreference.com/w/cpp/algorithm/equal for more information. + template + bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred) + { + return algorithm_internal::EqualImpl( + first1, last1, first2, last2, std::forward(pred), typename std::iterator_traits::iterator_category{}, typename std::iterator_traits::iterator_category{} + ); + } + + // Overload of equal() that performs comparison of two ranges specified by pairs + // of iterators using operator==. + template + bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2) + { + return absl::equal(first1, last1, first2, last2, algorithm_internal::EqualTo{}); + } + + // linear_search() + // + // Performs a linear search for `value` using the iterator `first` up to + // but not including `last`, returning true if [`first`, `last`) contains an + // element equal to `value`. + // + // A linear search is of O(n) complexity which is guaranteed to make at most + // n = (`last` - `first`) comparisons. A linear search over short containers + // may be faster than a binary search, even when the container is sorted. + template + bool linear_search(InputIterator first, InputIterator last, const EqualityComparable& value) + { + return std::find(first, last, value) != last; + } + + // rotate() + // + // Performs a left rotation on a range of elements (`first`, `last`) such that + // `middle` is now the first element. `rotate()` returns an iterator pointing to + // the first element before rotation. This function is exactly the same as + // `std::rotate`, but fixes a bug in gcc + // <= 4.9 where `std::rotate` returns `void` instead of an iterator. + // + // The complexity of this algorithm is the same as that of `std::rotate`, but if + // `ForwardIterator` is not a random-access iterator, then `absl::rotate` + // performs an additional pass over the range to construct the return value. + template + ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + return algorithm_internal::RotateImpl( + first, middle, last, std::is_same() + ); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/CAPI/cpp/grpc/include/absl/algorithm/container.h b/CAPI/cpp/grpc/include/absl/algorithm/container.h new file mode 100644 index 00000000..8babfee2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/algorithm/container.h @@ -0,0 +1,1627 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: container.h +// ----------------------------------------------------------------------------- +// +// This header file provides Container-based versions of algorithmic functions +// within the C++ standard library. The following standard library sets of +// functions are covered within this file: +// +// * Algorithmic functions +// * Algorithmic functions +// * functions +// +// The standard library functions operate on iterator ranges; the functions +// within this API operate on containers, though many return iterator ranges. +// +// All functions within this API are named with a `c_` prefix. Calls such as +// `absl::c_xx(container, ...) are equivalent to std:: functions such as +// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on +// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) +// have no equivalent here. +// +// For template parameter and variable naming, `C` indicates the container type +// to which the function is applied, `Pred` indicates the predicate object type +// to be used by the function and `T` indicates the applicable element type. + +#ifndef ABSL_ALGORITHM_CONTAINER_H_ +#define ABSL_ALGORITHM_CONTAINER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_algorithm_internal + { + + // NOTE: it is important to defer to ADL lookup for building with C++ modules, + // especially for headers like which are not visible from this file + // but specialize std::begin and std::end. + using std::begin; + using std::end; + + // The type of the iterator given by begin(c) (possibly std::begin(c)). + // ContainerIter> gives vector::const_iterator, + // while ContainerIter> gives vector::iterator. + template + using ContainerIter = decltype(begin(std::declval())); + + // An MSVC bug involving template parameter substitution requires us to use + // decltype() here instead of just std::pair. + template + using ContainerIterPairType = + decltype(std::make_pair(ContainerIter(), ContainerIter())); + + template + using ContainerDifferenceType = decltype(std::distance( + std::declval>(), std::declval>() + )); + + template + using ContainerPointerType = + typename std::iterator_traits>::pointer; + + // container_algorithm_internal::c_begin and + // container_algorithm_internal::c_end are abbreviations for proper ADL + // lookup of std::begin and std::end, i.e. + // using std::begin; + // using std::end; + // std::foo(begin(c), end(c)); + // becomes + // std::foo(container_algorithm_internal::begin(c), + // container_algorithm_internal::end(c)); + // These are meant for internal use only. + + template + ContainerIter c_begin(C& c) + { + return begin(c); + } + + template + ContainerIter c_end(C& c) + { + return end(c); + } + + template + struct IsUnorderedContainer : std::false_type + { + }; + + template + struct IsUnorderedContainer< + std::unordered_map> : std::true_type + { + }; + + template + struct IsUnorderedContainer> : std::true_type + { + }; + + // container_algorithm_internal::c_size. It is meant for internal use only. + + template + auto c_size(C& c) -> decltype(c.size()) + { + return c.size(); + } + + template + constexpr std::size_t c_size(T (&)[N]) + { + return N; + } + + } // namespace container_algorithm_internal + + // PUBLIC API + + //------------------------------------------------------------------------------ + // Abseil algorithm.h functions + //------------------------------------------------------------------------------ + + // c_linear_search() + // + // Container-based version of absl::linear_search() for performing a linear + // search within a container. + template + bool c_linear_search(const C& c, EqualityComparable&& value) + { + return linear_search(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + //------------------------------------------------------------------------------ + // algorithms + //------------------------------------------------------------------------------ + + // c_distance() + // + // Container-based version of the `std::distance()` function to + // return the number of elements within a container. + template + container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c + ) + { + return std::distance(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + //------------------------------------------------------------------------------ + // Non-modifying sequence operations + //------------------------------------------------------------------------------ + + // c_all_of() + // + // Container-based version of the `std::all_of()` function to + // test if all elements within a container satisfy a condition. + template + bool c_all_of(const C& c, Pred&& pred) + { + return std::all_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_any_of() + // + // Container-based version of the `std::any_of()` function to + // test if any element in a container fulfills a condition. + template + bool c_any_of(const C& c, Pred&& pred) + { + return std::any_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_none_of() + // + // Container-based version of the `std::none_of()` function to + // test if no elements in a container fulfill a condition. + template + bool c_none_of(const C& c, Pred&& pred) + { + return std::none_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_for_each() + // + // Container-based version of the `std::for_each()` function to + // apply a function to a container's elements. + template + decay_t c_for_each(C&& c, Function&& f) + { + return std::for_each(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(f)); + } + + // c_find() + // + // Container-based version of the `std::find()` function to find + // the first element containing the passed value within a container value. + template + container_algorithm_internal::ContainerIter c_find(C& c, T&& value) + { + return std::find(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + // c_find_if() + // + // Container-based version of the `std::find_if()` function to find + // the first element in a container matching the given condition. + template + container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) + { + return std::find_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_find_if_not() + // + // Container-based version of the `std::find_if_not()` function to + // find the first element in a container not matching the given condition. + template + container_algorithm_internal::ContainerIter c_find_if_not(C& c, Pred&& pred) + { + return std::find_if_not(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_find_end() + // + // Container-based version of the `std::find_end()` function to + // find the last subsequence within a container. + template + container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence + ) + { + return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); + } + + // Overload of c_find_end() for using a predicate evaluation other than `==` as + // the function's test condition. + template + container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred + ) + { + return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); + } + + // c_find_first_of() + // + // Container-based version of the `std::find_first_of()` function to + // find the first element within the container that is also within the options + // container. + template + container_algorithm_internal::ContainerIter c_find_first_of(C1& container, C2& options) + { + return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options)); + } + + // Overload of c_find_first_of() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred + ) + { + return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options), std::forward(pred)); + } + + // c_adjacent_find() + // + // Container-based version of the `std::adjacent_find()` function to + // find equal adjacent elements within a container. + template + container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence + ) + { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_adjacent_find() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred + ) + { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(pred)); + } + + // c_count() + // + // Container-based version of the `std::count()` function to count + // values that match within a container. + template + container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value + ) + { + return std::count(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + // c_count_if() + // + // Container-based version of the `std::count_if()` function to + // count values matching a condition within a container. + template + container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred + ) + { + return std::count_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_mismatch() + // + // Container-based version of the `std::mismatch()` function to + // return the first element where two ordered containers differ. Applies `==` to + // the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, C2& c2) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + // Negates equality because Cpp17EqualityComparable doesn't require clients + // to overload both `operator==` and `operator!=`. + if (!(*first1 == *first2)) + { + break; + } + } + + return std::make_pair(first1, first2); + } + + // Overload of c_mismatch() for using a predicate evaluation other than `==` as + // the function's test condition. Applies `pred`to the first N elements of `c1` + // and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIterPairType c_mismatch( + C1& c1, C2& c2, BinaryPredicate pred + ) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + if (!pred(*first1, *first2)) + { + break; + } + } + + return std::make_pair(first1, first2); + } + + // c_equal() + // + // Container-based version of the `std::equal()` function to + // test whether two containers are equal. + // + // NOTE: the semantics of c_equal() are slightly different than those of + // equal(): while the latter iterates over the second container only up to the + // size of the first container, c_equal() also checks whether the container + // sizes are equal. This better matches expectations about c_equal() based on + // its signature. + // + // Example: + // vector v1 = <1, 2, 3>; + // vector v2 = <1, 2, 3, 4>; + // equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true + // c_equal(v1, v2) returns false + + template + bool c_equal(const C1& c1, const C2& c2) + { + return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2))); + } + + // Overload of c_equal() for using a predicate evaluation other than `==` as + // the function's test condition. + template + bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) + { + return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), std::forward(pred))); + } + + // c_is_permutation() + // + // Container-based version of the `std::is_permutation()` function + // to test whether a container is a permutation of another. + template + bool c_is_permutation(const C1& c1, const C2& c2) + { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2)); + } + + // Overload of c_is_permutation() for using a predicate evaluation other than + // `==` as the function's test condition. + template + bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) + { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2), std::forward(pred)); + } + + // c_search() + // + // Container-based version of the `std::search()` function to search + // a container for a subsequence. + template + container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence + ) + { + return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); + } + + // Overload of c_search() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred + ) + { + return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); + } + + // c_search_n() + // + // Container-based version of the `std::search_n()` function to + // search a container for the first sequence of N elements. + template + container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value + ) + { + return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value)); + } + + // Overload of c_search_n() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred + ) + { + return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value), std::forward(pred)); + } + + //------------------------------------------------------------------------------ + // Modifying sequence operations + //------------------------------------------------------------------------------ + + // c_copy() + // + // Container-based version of the `std::copy()` function to copy a + // container's elements into an iterator. + template + OutputIterator c_copy(const InputSequence& input, OutputIterator output) + { + return std::copy(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output); + } + + // c_copy_n() + // + // Container-based version of the `std::copy_n()` function to copy a + // container's first N elements into an iterator. + template + OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) + { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); + } + + // c_copy_if() + // + // Container-based version of the `std::copy_if()` function to copy + // a container's elements satisfying some condition into an iterator. + template + OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, Pred&& pred) + { + return std::copy_if(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(pred)); + } + + // c_copy_backward() + // + // Container-based version of the `std::copy_backward()` function to + // copy a container's elements in reverse order into an iterator. + template + BidirectionalIterator c_copy_backward(const C& src, BidirectionalIterator dest) + { + return std::copy_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_move() + // + // Container-based version of the `std::move()` function to move + // a container's elements into an iterator. + template + OutputIterator c_move(C&& src, OutputIterator dest) + { + return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_move_backward() + // + // Container-based version of the `std::move_backward()` function to + // move a container's elements into an iterator in reverse order. + template + BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) + { + return std::move_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_swap_ranges() + // + // Container-based version of the `std::swap_ranges()` function to + // swap a container's elements with another container's elements. Swaps the + // first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + using std::swap; + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + swap(*first1, *first2); + } + return first2; + } + + // c_transform() + // + // Container-based version of the `std::transform()` function to + // transform a container's elements using the unary operation, storing the + // result in an iterator pointing to the last transformed element in the output + // range. + template + OutputIterator c_transform(const InputSequence& input, OutputIterator output, UnaryOp&& unary_op) + { + return std::transform(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(unary_op)); + } + + // Overload of c_transform() for performing a transformation using a binary + // predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, + // where N = min(size(c1), size(c2)). + template + OutputIterator c_transform(const InputSequence1& input1, const InputSequence2& input2, OutputIterator output, BinaryOp&& binary_op) + { + auto first1 = container_algorithm_internal::c_begin(input1); + auto last1 = container_algorithm_internal::c_end(input1); + auto first2 = container_algorithm_internal::c_begin(input2); + auto last2 = container_algorithm_internal::c_end(input2); + for (; first1 != last1 && first2 != last2; + ++first1, (void)++first2, ++output) + { + *output = binary_op(*first1, *first2); + } + + return output; + } + + // c_replace() + // + // Container-based version of the `std::replace()` function to + // replace a container's elements of some value with a new value. The container + // is modified in place. + template + void c_replace(Sequence& sequence, const T& old_value, const T& new_value) + { + std::replace(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), old_value, new_value); + } + + // c_replace_if() + // + // Container-based version of the `std::replace_if()` function to + // replace a container's elements of some value with a new value based on some + // condition. The container is modified in place. + template + void c_replace_if(C& c, Pred&& pred, T&& new_value) + { + std::replace_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred), std::forward(new_value)); + } + + // c_replace_copy() + // + // Container-based version of the `std::replace_copy()` function to + // replace a container's elements of some value with a new value and return the + // results within an iterator. + template + OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, T&& new_value) + { + return std::replace_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(old_value), std::forward(new_value)); + } + + // c_replace_copy_if() + // + // Container-based version of the `std::replace_copy_if()` function + // to replace a container's elements of some value with a new value based on + // some condition, and return the results within an iterator. + template + OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, const T& new_value) + { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred), new_value); + } + + // c_fill() + // + // Container-based version of the `std::fill()` function to fill a + // container with some value. + template + void c_fill(C& c, const T& value) + { + std::fill(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), value); + } + + // c_fill_n() + // + // Container-based version of the `std::fill_n()` function to fill + // the first N elements in a container with some value. + template + void c_fill_n(C& c, Size n, const T& value) + { + std::fill_n(container_algorithm_internal::c_begin(c), n, value); + } + + // c_generate() + // + // Container-based version of the `std::generate()` function to + // assign a container's elements to the values provided by the given generator. + template + void c_generate(C& c, Generator&& gen) + { + std::generate(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); + } + + // c_generate_n() + // + // Container-based version of the `std::generate_n()` function to + // assign a container's first N elements to the values provided by the given + // generator. + template + container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, Generator&& gen) + { + return std::generate_n(container_algorithm_internal::c_begin(c), n, std::forward(gen)); + } + + // Note: `c_xx()` container versions for `remove()`, `remove_if()`, + // and `unique()` are omitted, because it's not clear whether or not such + // functions should call erase on their supplied sequences afterwards. Either + // behavior would be surprising for a different set of users. + + // c_remove_copy() + // + // Container-based version of the `std::remove_copy()` function to + // copy a container's elements while removing any elements matching the given + // `value`. + template + OutputIterator c_remove_copy(const C& c, OutputIterator result, const T& value) + { + return std::remove_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, value); + } + + // c_remove_copy_if() + // + // Container-based version of the `std::remove_copy_if()` function + // to copy a container's elements while removing any elements matching the given + // condition. + template + OutputIterator c_remove_copy_if(const C& c, OutputIterator result, Pred&& pred) + { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); + } + + // c_unique_copy() + // + // Container-based version of the `std::unique_copy()` function to + // copy a container's elements while removing any elements containing duplicate + // values. + template + OutputIterator c_unique_copy(const C& c, OutputIterator result) + { + return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result); + } + + // Overload of c_unique_copy() for using a predicate evaluation other than + // `==` for comparing uniqueness of the element values. + template + OutputIterator c_unique_copy(const C& c, OutputIterator result, BinaryPredicate&& pred) + { + return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); + } + + // c_reverse() + // + // Container-based version of the `std::reverse()` function to + // reverse a container's elements. + template + void c_reverse(Sequence& sequence) + { + std::reverse(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // c_reverse_copy() + // + // Container-based version of the `std::reverse()` function to + // reverse a container's elements and write them to an iterator range. + template + OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) + { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), result); + } + + // c_rotate() + // + // Container-based version of the `std::rotate()` function to + // shift a container's elements leftward such that the `middle` element becomes + // the first element in the container. + template> + Iterator c_rotate(C& sequence, Iterator middle) + { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); + } + + // c_rotate_copy() + // + // Container-based version of the `std::rotate_copy()` function to + // shift a container's elements leftward such that the `middle` element becomes + // the first element in a new iterator range. + template + OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result + ) + { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), result); + } + + // c_shuffle() + // + // Container-based version of the `std::shuffle()` function to + // randomly shuffle elements within the container using a `gen()` uniform random + // number generator. + template + void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) + { + std::shuffle(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); + } + + //------------------------------------------------------------------------------ + // Partition functions + //------------------------------------------------------------------------------ + + // c_is_partitioned() + // + // Container-based version of the `std::is_partitioned()` function + // to test whether all elements in the container for which `pred` returns `true` + // precede those for which `pred` is `false`. + template + bool c_is_partitioned(const C& c, Pred&& pred) + { + return std::is_partitioned(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_partition() + // + // Container-based version of the `std::partition()` function + // to rearrange all elements in a container in such a way that all elements for + // which `pred` returns `true` precede all those for which it returns `false`, + // returning an iterator to the first element of the second group. + template + container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) + { + return std::partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_stable_partition() + // + // Container-based version of the `std::stable_partition()` function + // to rearrange all elements in a container in such a way that all elements for + // which `pred` returns `true` precede all those for which it returns `false`, + // preserving the relative ordering between the two groups. The function returns + // an iterator to the first element of the second group. + template + container_algorithm_internal::ContainerIter c_stable_partition(C& c, Pred&& pred) + { + return std::stable_partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_partition_copy() + // + // Container-based version of the `std::partition_copy()` function + // to partition a container's elements and return them into two iterators: one + // for which `pred` returns `true`, and one for which `pred` returns `false.` + + template + std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, Pred&& pred + ) + { + return std::partition_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), out_true, out_false, std::forward(pred)); + } + + // c_partition_point() + // + // Container-based version of the `std::partition_point()` function + // to return the first element of an already partitioned container for which + // the given `pred` is not `true`. + template + container_algorithm_internal::ContainerIter c_partition_point(C& c, Pred&& pred) + { + return std::partition_point(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + //------------------------------------------------------------------------------ + // Sorting functions + //------------------------------------------------------------------------------ + + // c_sort() + // + // Container-based version of the `std::sort()` function + // to sort elements in ascending order of their values. + template + void c_sort(C& c) + { + std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_sort() for performing a `comp` comparison other than the + // default `operator<`. + template + void c_sort(C& c, LessThan&& comp) + { + std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_stable_sort() + // + // Container-based version of the `std::stable_sort()` function + // to sort elements in ascending order of their values, preserving the order + // of equivalents. + template + void c_stable_sort(C& c) + { + std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_stable_sort() for performing a `comp` comparison other than the + // default `operator<`. + template + void c_stable_sort(C& c, LessThan&& comp) + { + std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_is_sorted() + // + // Container-based version of the `std::is_sorted()` function + // to evaluate whether the given container is sorted in ascending order. + template + bool c_is_sorted(const C& c) + { + return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // c_is_sorted() overload for performing a `comp` comparison other than the + // default `operator<`. + template + bool c_is_sorted(const C& c, LessThan&& comp) + { + return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_partial_sort() + // + // Container-based version of the `std::partial_sort()` function + // to rearrange elements within a container such that elements before `middle` + // are sorted in ascending order. + template + void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle + ) + { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_partial_sort() for performing a `comp` comparison other than + // the default `operator<`. + template + void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp + ) + { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_partial_sort_copy() + // + // Container-based version of the `std::partial_sort_copy()` + // function to sort the elements in the given range `result` within the larger + // `sequence` in ascending order (and using `result` as the output parameter). + // At most min(result.last - result.first, sequence.last - sequence.first) + // elements from the sequence will be stored in the result. + template + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) + { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result)); + } + + // Overload of c_partial_sort_copy() for performing a `comp` comparison other + // than the default `operator<`. + template + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, LessThan&& comp) + { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result), std::forward(comp)); + } + + // c_is_sorted_until() + // + // Container-based version of the `std::is_sorted_until()` function + // to return the first element within a container that is not sorted in + // ascending order as an iterator. + template + container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) + { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_is_sorted_until() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, LessThan&& comp + ) + { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_nth_element() + // + // Container-based version of the `std::nth_element()` function + // to rearrange the elements within a container such that the `nth` element + // would be in that position in an ordered sequence; other elements may be in + // any order, except that all preceding `nth` will be less than that element, + // and all following `nth` will be greater than that element. + template + void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth + ) + { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_nth_element() for performing a `comp` comparison other than + // the default `operator<`. + template + void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + LessThan&& comp + ) + { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Binary Search + //------------------------------------------------------------------------------ + + // c_lower_bound() + // + // Container-based version of the `std::lower_bound()` function + // to return an iterator pointing to the first element in a sorted container + // which does not compare less than `value`. + template + container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, const T& value + ) + { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); + } + + // Overload of c_lower_bound() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, const T& value, LessThan&& comp + ) + { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); + } + + // c_upper_bound() + // + // Container-based version of the `std::upper_bound()` function + // to return an iterator pointing to the first element in a sorted container + // which is greater than `value`. + template + container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, const T& value + ) + { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); + } + + // Overload of c_upper_bound() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, const T& value, LessThan&& comp + ) + { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); + } + + // c_equal_range() + // + // Container-based version of the `std::equal_range()` function + // to return an iterator pair pointing to the first and last elements in a + // sorted container which compare equal to `value`. + template + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value) + { + return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); + } + + // Overload of c_equal_range() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) + { + return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); + } + + // c_binary_search() + // + // Container-based version of the `std::binary_search()` function + // to test if any element in the sorted container contains a value equivalent to + // 'value'. + template + bool c_binary_search(const Sequence& sequence, const T& value) + { + return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); + } + + // Overload of c_binary_search() for performing a `comp` comparison other than + // the default `operator<`. + template + bool c_binary_search(const Sequence& sequence, const T& value, LessThan&& comp) + { + return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Merge functions + //------------------------------------------------------------------------------ + + // c_merge() + // + // Container-based version of the `std::merge()` function + // to merge two sorted containers into a single sorted iterator. + template + OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) + { + return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result); + } + + // Overload of c_merge() for performing a `comp` comparison other than + // the default `operator<`. + template + OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, LessThan&& comp) + { + return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result, std::forward(comp)); + } + + // c_inplace_merge() + // + // Container-based version of the `std::inplace_merge()` function + // to merge a supplied iterator `middle` into a container. + template + void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle) + { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c)); + } + + // Overload of c_inplace_merge() for performing a merge using a `comp` other + // than `operator<`. + template + void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle, LessThan&& comp) + { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_includes() + // + // Container-based version of the `std::includes()` function + // to test whether a sorted container `c1` entirely contains another sorted + // container `c2`. + template + bool c_includes(const C1& c1, const C2& c2) + { + return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2)); + } + + // Overload of c_includes() for performing a merge using a `comp` other than + // `operator<`. + template + bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) + { + return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), std::forward(comp)); + } + + // c_set_union() + // + // Container-based version of the `std::set_union()` function + // to return an iterator containing the union of two containers; duplicate + // values are not copied into the output. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_union() for performing a merge using a `comp` other than + // `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_intersection() + // + // Container-based version of the `std::set_intersection()` function + // to return an iterator containing the intersection of two sorted containers. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) + { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using operator<. + assert(absl::c_is_sorted(c1)); + assert(absl::c_is_sorted(c2)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_intersection() for performing a merge using a `comp` other + // than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using the same comparator. + assert(absl::c_is_sorted(c1, comp)); + assert(absl::c_is_sorted(c2, comp)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_difference() + // + // Container-based version of the `std::set_difference()` function + // to return an iterator containing elements present in the first container but + // not in the second. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_difference() for performing a merge using a `comp` other + // than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_symmetric_difference() + // + // Container-based version of the `std::set_symmetric_difference()` + // function to return an iterator containing elements present in either one + // container or the other, but not both. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + output + ); + } + + // Overload of c_set_symmetric_difference() for performing a merge using a + // `comp` other than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + output, + std::forward(comp) + ); + } + + //------------------------------------------------------------------------------ + // Heap functions + //------------------------------------------------------------------------------ + + // c_push_heap() + // + // Container-based version of the `std::push_heap()` function + // to push a value onto a container heap. + template + void c_push_heap(RandomAccessContainer& sequence) + { + std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_push_heap() for performing a push operation on a heap using a + // `comp` other than `operator<`. + template + void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_pop_heap() + // + // Container-based version of the `std::pop_heap()` function + // to pop a value from a heap container. + template + void c_pop_heap(RandomAccessContainer& sequence) + { + std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_pop_heap() for performing a pop operation on a heap using a + // `comp` other than `operator<`. + template + void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_make_heap() + // + // Container-based version of the `std::make_heap()` function + // to make a container a heap. + template + void c_make_heap(RandomAccessContainer& sequence) + { + std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_make_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_sort_heap() + // + // Container-based version of the `std::sort_heap()` function + // to sort a heap into ascending order (after which it is no longer a heap). + template + void c_sort_heap(RandomAccessContainer& sequence) + { + std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_sort_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_is_heap() + // + // Container-based version of the `std::is_heap()` function + // to check whether the given container is a heap. + template + bool c_is_heap(const RandomAccessContainer& sequence) + { + return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_is_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) + { + return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_is_heap_until() + // + // Container-based version of the `std::is_heap_until()` function + // to find the first element in a given container which is not in heap order. + template + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence) + { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_is_heap_until() for performing heap comparisons using a + // `comp` other than `operator<` + template + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) + { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Min/max + //------------------------------------------------------------------------------ + + // c_min_element() + // + // Container-based version of the `std::min_element()` function + // to return an iterator pointing to the element with the smallest value, using + // `operator<` to make the comparisons. + template + container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence + ) + { + return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_min_element() for performing a `comp` comparison other than + // `operator<`. + template + container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, LessThan&& comp + ) + { + return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_max_element() + // + // Container-based version of the `std::max_element()` function + // to return an iterator pointing to the element with the largest value, using + // `operator<` to make the comparisons. + template + container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence + ) + { + return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_max_element() for performing a `comp` comparison other than + // `operator<`. + template + container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, LessThan&& comp + ) + { + return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_minmax_element() + // + // Container-based version of the `std::minmax_element()` function + // to return a pair of iterators pointing to the elements containing the + // smallest and largest values, respectively, using `operator<` to make the + // comparisons. + template + container_algorithm_internal::ContainerIterPairType c_minmax_element( + C& c + ) + { + return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_minmax_element() for performing `comp` comparisons other than + // `operator<`. + template + container_algorithm_internal::ContainerIterPairType c_minmax_element( + C& c, LessThan&& comp + ) + { + return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Lexicographical Comparisons + //------------------------------------------------------------------------------ + + // c_lexicographical_compare() + // + // Container-based version of the `std::lexicographical_compare()` + // function to lexicographically compare (e.g. sort words alphabetically) two + // container sequences. The comparison is performed using `operator<`. Note + // that capital letters ("A-Z") have ASCII values less than lowercase letters + // ("a-z"). + template + bool c_lexicographical_compare(const Sequence1& sequence1, const Sequence2& sequence2) + { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2) + ); + } + + // Overload of c_lexicographical_compare() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_lexicographical_compare(const Sequence1& sequence1, const Sequence2& sequence2, LessThan&& comp) + { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp) + ); + } + + // c_next_permutation() + // + // Container-based version of the `std::next_permutation()` function + // to rearrange a container's elements into the next lexicographically greater + // permutation. + template + bool c_next_permutation(C& c) + { + return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_next_permutation() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_next_permutation(C& c, LessThan&& comp) + { + return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_prev_permutation() + // + // Container-based version of the `std::prev_permutation()` function + // to rearrange a container's elements into the next lexicographically lesser + // permutation. + template + bool c_prev_permutation(C& c) + { + return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_prev_permutation() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_prev_permutation(C& c, LessThan&& comp) + { + return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // algorithms + //------------------------------------------------------------------------------ + + // c_iota() + // + // Container-based version of the `std::iota()` function + // to compute successive values of `value`, as if incremented with `++value` + // after each element is written. and write them to the container. + template + void c_iota(Sequence& sequence, const T& value) + { + std::iota(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); + } + + // c_accumulate() + // + // Container-based version of the `std::accumulate()` function + // to accumulate the element values of a container to `init` and return that + // accumulation by value. + // + // Note: Due to a language technicality this function has return type + // absl::decay_t. As a user of this function you can casually read + // this as "returns T by value" and assume it does the right thing. + template + decay_t c_accumulate(const Sequence& sequence, T&& init) + { + return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init)); + } + + // Overload of c_accumulate() for using a binary operations other than + // addition for computing the accumulation. + template + decay_t c_accumulate(const Sequence& sequence, T&& init, BinaryOp&& binary_op) + { + return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init), std::forward(binary_op)); + } + + // c_inner_product() + // + // Container-based version of the `std::inner_product()` function + // to compute the cumulative inner product of container element pairs. + // + // Note: Due to a language technicality this function has return type + // absl::decay_t. As a user of this function you can casually read + // this as "returns T by value" and assume it does the right thing. + template + decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum) + { + return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum)); + } + + // Overload of c_inner_product() for using binary operations other than + // `operator+` (for computing the accumulation) and `operator*` (for computing + // the product between the two container's element pair). + template + decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) + { + return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum), std::forward(op1), std::forward(op2)); + } + + // c_adjacent_difference() + // + // Container-based version of the `std::adjacent_difference()` + // function to compute the difference between each element and the one preceding + // it and write it to an iterator. + template + OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first) + { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); + } + + // Overload of c_adjacent_difference() for using a binary operation other than + // subtraction to compute the adjacent difference. + template + OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first, BinaryOp&& op) + { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); + } + + // c_partial_sum() + // + // Container-based version of the `std::partial_sum()` function + // to compute the partial sum of the elements in a sequence and write them + // to an iterator. The partial sum is the sum of all element values so far in + // the sequence. + template + OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) + { + return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); + } + + // Overload of c_partial_sum() for using a binary operation other than addition + // to compute the "partial sum". + template + OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, BinaryOp&& op) + { + return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/attributes.h b/CAPI/cpp/grpc/include/absl/base/attributes.h new file mode 100644 index 00000000..f7f0edd2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/attributes.h @@ -0,0 +1,834 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). + +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +#include "absl/base/config.h" + +// ABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE +// ABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE +// ABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// ABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes did not work properly in LLVM's Windows backend before +// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ + !defined(__MINGW32__) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// ABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif + +// ABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) && \ + ABSL_HAVE_ATTRIBUTE(no_sanitize_address) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \ + _MSC_VER >= 1928 +// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) +#elif defined(ABSL_HAVE_HWADDRESS_SANITIZER) && ABSL_HAVE_ATTRIBUTE(no_sanitize) +// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU +// features to detect similar bugs with less CPU and memory overhead. +// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11. +// https://gcc.gnu.org/gcc-11/changes.html +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \ + __attribute__((no_sanitize("hwaddress"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All "Use +// of uninitialized value" warnings from such functions will be suppressed, and +// all values loaded from memory will be considered fully initialized. This +// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute +// above, but deals with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize_undefined)) +#elif ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// ABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) +#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define ABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// ABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 + +// ABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + +// ABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#ifdef _AIX +// __attribute__((section(#name))) on AIX is achieved by using the `.csect` +// psudo op which includes an additional integer as part of its syntax indcating +// alignment. If data fall under different alignments then you might get a +// compilation error indicating a `Section type conflict`. +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#else +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif +#endif + +// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// ABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) + +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) + +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard `[[nodiscard]]` directly over this macro. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +// +// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is +// compliant with the stricter [[nodiscard]]. +#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +// XRay isn't currently supported on Android: +// https://github.com/android/ndk/issues/368 +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard '[[maybe_unused]]' directly over this macro. +// +// Due to differences in positioning requirements between the old, compiler +// specific __attribute__ syntax and the now standard [[maybe_unused]], this +// macro does not attempt to take advantage of '[[maybe_unused]]'. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif + +// ABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// +// Instructs the compiler not to use natural alignment for a tagged data +// structure, but instead to reduce its alignment to 1. +// +// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing +// so can cause atomic variables to be mis-aligned and silently violate +// atomicity on x86. +// +// This attribute can either be applied to members of a structure or to a +// structure in its entirety. Applying this attribute (judiciously) to a +// structure in its entirety to optimize the memory footprint of very +// commonly-used structs is fine. Do not apply this attribute to a structure in +// its entirety if the purpose is to control the offsets of the members in the +// structure. Instead, apply this attribute only to structure members that need +// it. +// +// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the +// natural alignment of structure members not annotated is preserved. Aligned +// member accesses are faster than non-aligned member accesses even if the +// targeted microprocessor supports non-aligned accesses. +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: When supported, GCC and Clang can issue a warning on switch labels +// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See +// clang documentation on language extensions for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has +// no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. + +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#else +#define ABSL_FALLTHROUGH_INTENDED \ + do \ + { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// For code or headers that are assured to only build with C++14 and up, prefer +// just using the standard `[[deprecated("message")]]` directly over this macro. +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// enum FooEnum { +// kBar ABSL_DEPRECATED("Use kBaz instead"), +// }; +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain +// turns this warning off by default, instead relying on clang-tidy to report +// new uses of deprecated code. +#if ABSL_HAVE_ATTRIBUTE(deprecated) +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#else +#define ABSL_DEPRECATED(message) +#endif + +// When deprecating Abseil code, it is sometimes necessary to turn off the +// warning within Abseil, until the deprecated code is actually removed. The +// deprecated code can be surrounded with these directives to acheive that +// result. +// +// class ABSL_DEPRECATED("Use Bar instead") Foo; +// +// ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING +// Baz ComputeBazFromFoo(Foo f); +// ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING +#if defined(__GNUC__) || defined(__clang__) +// Clang also supports these GCC pragmas. +#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \ + _Pragma("GCC diagnostic pop") +#else +#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING +#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING +#endif // defined(__GNUC__) || defined(__clang__) + +// ABSL_CONST_INIT +// +// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". +// +// This attribute must be placed on the initializing declaration of the +// variable. Some compilers will give a -Wmissing-constinit warning when this +// attribute is placed on some other declaration but missing from the +// initializing declaration. +// +// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can +// also be used in a non-initializing declaration to tell the compiler that a +// variable is already initialized, reducing overhead that would otherwise be +// incurred by a hidden guard variable. Thus annotating all declarations with +// this attribute is recommended to potentially enhance optimization. +// +// Example: +// +// class MyClass { +// public: +// ABSL_CONST_INIT static MyType my_var; +// }; +// +// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); +// +// For code or headers that are assured to only build with C++20 and up, prefer +// just using the standard `constinit` keyword directly over this macro. +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if defined(__cpp_constinit) && __cpp_constinit >= 201907L +#define ABSL_CONST_INIT constinit +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif + +// These annotations are not available yet due to fear of breaking code. +#define ABSL_ATTRIBUTE_PURE_FUNCTION +#define ABSL_ATTRIBUTE_CONST_FUNCTION + +// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function +// parameter or implicit object parameter is retained by the return value of the +// annotated function (or, for a parameter of a constructor, in the value of the +// constructed object). This attribute causes warnings to be produced if a +// temporary object does not live long enough. +// +// When applied to a reference parameter, the referenced object is assumed to be +// retained by the return value of the function. When applied to a non-reference +// parameter (for example, a pointer or a class type), all temporaries +// referenced by the parameter are assumed to be retained by the return value of +// the function. +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] +#elif ABSL_HAVE_ATTRIBUTE(lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) +#else +#define ABSL_ATTRIBUTE_LIFETIME_BOUND +#endif + +// ABSL_ATTRIBUTE_TRIVIAL_ABI +// Indicates that a type is "trivially relocatable" -- meaning it can be +// relocated without invoking the constructor/destructor, using a form of move +// elision. +// +// From a memory safety point of view, putting aside destructor ordering, it's +// safe to apply ABSL_ATTRIBUTE_TRIVIAL_ABI if an object's location +// can change over the course of its lifetime: if a constructor can be run one +// place, and then the object magically teleports to another place where some +// methods are run, and then the object teleports to yet another place where it +// is destroyed. This is notably not true for self-referential types, where the +// move-constructor must keep the self-reference up to date. If the type changed +// location without invoking the move constructor, it would have a dangling +// self-reference. +// +// The use of this teleporting machinery means that the number of paired +// move/destroy operations can change, and so it is a bad idea to apply this to +// a type meant to count the number of moves. +// +// Warning: applying this can, rarely, break callers. Objects passed by value +// will be destroyed at the end of the call, instead of the end of the +// full-expression containing the call. In addition, it changes the ABI +// of functions accepting this type by value (e.g. to pass in registers). +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::trivial_abi) +#define ABSL_ATTRIBUTE_TRIVIAL_ABI [[clang::trivial_abi]] +#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 +#elif ABSL_HAVE_ATTRIBUTE(trivial_abi) +#define ABSL_ATTRIBUTE_TRIVIAL_ABI __attribute__((trivial_abi)) +#define ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI 1 +#else +#define ABSL_ATTRIBUTE_TRIVIAL_ABI +#endif + +// ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS +// +// Indicates a data member can be optimized to occupy no space (if it is empty) +// and/or its tail padding can be used for other members. +// +// For code that is assured to only build with C++20 or later, prefer using +// the standard attribute `[[no_unique_address]]` directly instead of this +// macro. +// +// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address +// Current versions of MSVC have disabled `[[no_unique_address]]` since it +// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for +// situations when it can be assured that it is desired. Since Abseil does not +// claim ABI compatibility in mixed builds, we can offer it unconditionally. +#if defined(_MSC_VER) && _MSC_VER >= 1929 +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address) +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS +#endif + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/call_once.h b/CAPI/cpp/grpc/include/absl/base/call_once.h new file mode 100644 index 00000000..0f0e6924 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/call_once.h @@ -0,0 +1,234 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class once_flag; + + namespace base_internal + { + std::atomic* ControlWord(absl::once_flag* flag); + } // namespace base_internal + + // call_once() + // + // For all invocations using a given `once_flag`, invokes a given `fn` exactly + // once across all threads. The first call to `call_once()` with a particular + // `once_flag` argument (that does not throw an exception) will run the + // specified function with the provided `args`; other calls with the same + // `once_flag` argument will not run the function, but will wait + // for the provided function to finish running (if it is still running). + // + // This mechanism provides a safe, simple, and fast mechanism for one-time + // initialization in a multi-threaded process. + // + // Example: + // + // class MyInitClass { + // public: + // ... + // mutable absl::once_flag once_; + // + // MyInitClass* init() const { + // absl::call_once(once_, &MyInitClass::Init, this); + // return ptr_; + // } + // + template + void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + + // once_flag + // + // Objects of this type are used to distinguish calls to `call_once()` and + // ensure the provided function is only invoked once across all threads. This + // type is not copyable or movable. However, it has a `constexpr` + // constructor, and is safe to use as a namespace-scoped global variable. + class once_flag + { + public: + constexpr once_flag() : + control_(0) + { + } + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic* base_internal::ControlWord(once_flag* flag); + std::atomic control_; + }; + + //------------------------------------------------------------------------------ + // End of public interfaces. + // Implementation details follow. + //------------------------------------------------------------------------------ + + namespace base_internal + { + + // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to + // initialize entities used by the scheduler implementation. + template + void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + + // Disables scheduling while on stack when scheduling mode is non-cooperative. + // No effect for cooperative scheduling modes. + class SchedulingHelper + { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : + mode_(mode) + { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) + { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() + { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) + { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_ = false; + }; + + // Bit patterns for call_once state machine values. Internal implementation + // detail, not for use by clients. + // + // The bit patterns are arbitrarily chosen from unlikely values, to aid in + // debugging. However, kOnceInit must be 0, so that a zero-initialized + // once_flag will be valid for immediate use. + enum + { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number + }; + + template + ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl(std::atomic* control, base_internal::SchedulingMode scheduling_mode, Callable&& fn, Args&&... args) + { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) + { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit) + { + base_internal::invoke(std::forward(fn), std::forward(args)...); + old_control = + control->exchange(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) + { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone + } + + inline std::atomic* ControlWord(once_flag* flag) + { + return &flag->control_; + } + + template + void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) + { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) + { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, std::forward(fn), std::forward(args)...); + } + } + + } // namespace base_internal + + template + void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) + { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) + { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, std::forward(fn), std::forward(args)... + ); + } + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/casts.h b/CAPI/cpp/grpc/include/absl/base/casts.h new file mode 100644 index 00000000..a83c389c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/casts.h @@ -0,0 +1,182 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include +#include +#include + +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // implicit_cast() + // + // Performs an implicit conversion between types following the language + // rules for implicit conversion; if an implicit conversion is otherwise + // allowed by the language in the given context, this function performs such an + // implicit conversion. + // + // Example: + // + // // If the context allows implicit conversion: + // From from; + // To to = from; + // + // // Such code can be replaced by: + // implicit_cast(from); + // + // An `implicit_cast()` may also be used to annotate numeric type conversions + // that, although safe, may produce compiler warnings (such as `long` to `int`). + // Additionally, an `implicit_cast()` is also useful within return statements to + // indicate a specific implicit conversion is being undertaken. + // + // Example: + // + // return implicit_cast(size_in_bytes) / capacity_; + // + // Annotating code with `implicit_cast()` allows you to explicitly select + // particular overloads and template instantiations, while providing a safer + // cast than `reinterpret_cast()` or `static_cast()`. + // + // Additionally, an `implicit_cast()` can be used to allow upcasting within a + // type hierarchy where incorrect use of `static_cast()` could accidentally + // allow downcasting. + // + // Finally, an `implicit_cast()` can be used to perform implicit conversions + // from unrelated types that otherwise couldn't be implicitly cast directly; + // C++ will normally only implicitly cast "one step" in such conversions. + // + // That is, if C is a type which can be implicitly converted to B, with B being + // a type that can be implicitly converted to A, an `implicit_cast()` can be + // used to convert C to B (which the compiler can then implicitly convert to A + // using language rules). + // + // Example: + // + // // Assume an object C is convertible to B, which is implicitly convertible + // // to A + // A a = implicit_cast(C); + // + // Such implicit cast chaining may be useful within template logic. + template + constexpr To implicit_cast(typename absl::internal::identity_t to) + { + return to; + } + +// bit_cast() +// +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. +// +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong +// +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). +// +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + + using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + + template< + typename Dest, + typename Source, + typename std::enable_if::value && std::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) + inline constexpr Dest bit_cast(const Source& source) + { + return __builtin_bit_cast(Dest, source); + } +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + inline Dest bit_cast(const Source& source) + { + Dest dest; + memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); + return dest; + } +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/config.h b/CAPI/cpp/grpc/include/absl/base/config.h new file mode 100644 index 00000000..8ee265ac --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/config.h @@ -0,0 +1,932 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +// ABSL_INTERNAL_CPLUSPLUS_LANG +// +// MSVC does not set the value of __cplusplus correctly, but instead uses +// _MSVC_LANG as a stand-in. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +// +// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at +// times, for example: +// https://github.com/microsoft/vscode-cpptools/issues/1770 +// https://reviews.llvm.org/D70996 +// +// For this reason, this symbol is considered INTERNAL and code outside of +// Abseil must not use it. +#if defined(_MSVC_LANG) +#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG +#elif defined(__cplusplus) +#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus +#endif + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif + +#include "absl/base/options.h" +#include "absl/base/policy_checks.h" + +// Abseil long-term support (LTS) releases will define +// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the +// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the +// integer representing the patch-level for that release. +// +// For example, for LTS release version "20300401.2", this would give us +// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 +// +// These symbols will not be defined in non-LTS code. +// +// Abseil recommends that clients live-at-head. Therefore, if you are using +// these symbols to assert a minimum version requirement, we recommend you do it +// as +// +// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 +// #error Project foo requires Abseil LTS version >= 20300401 +// #endif +// +// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes +// live-at-head clients from the minimum version assertion. +// +// See https://abseil.io/about/releases for more information on Abseil release +// management. +// +// LTS releases can be obtained from +// https://github.com/abseil/abseil-cpp/releases. +#define ABSL_LTS_RELEASE_VERSION 20230802 +#define ABSL_LTS_RELEASE_PATCH_LEVEL 1 + +// Helper macro to convert a CPP variable to a string literal. +#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x +#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// ABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::Foo(). +// +// ABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \ + !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME) +#error options.h is misconfigured. +#endif + +// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor "" +#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 + +#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) + +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "not be empty."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); + +#endif + +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_NAMESPACE_BEGIN +#define ABSL_NAMESPACE_END +#define ABSL_INTERNAL_C_SYMBOL(x) x +#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 +#define ABSL_NAMESPACE_BEGIN \ + inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME \ + { +#define ABSL_NAMESPACE_END } +#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v +#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) +#define ABSL_INTERNAL_C_SYMBOL(x) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) +#else +#error options.h is misconfigured. +#endif + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +#ifdef __has_feature +#define ABSL_HAVE_FEATURE(f) __has_feature(f) +#else +#define ABSL_HAVE_FEATURE(f) 0 +#endif + +// Portable check for GCC minimum version: +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ + (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 +#endif + +#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ + (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux or Asylo when compiled with Clang or +// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif (defined(__linux__) || defined(__ASYLO__)) && \ + (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#else +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set +#else +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE +// +// Checks whether `std::is_trivially_copyable` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set +#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1 +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making ABSL_HAVE_FEATURE unreliable there. +// +#if ABSL_HAVE_FEATURE(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // ABSL_HAVE_INTRINSIC_INT128 + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. +#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) +// Clang >= 3.6 +#if ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // ABSL_HAVE_FEATURE(cxx_exceptions) +#elif defined(__clang__) +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ + !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ + defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) || defined(__VXWORKS__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_GETCPU +// +// Checks whether sched_getcpu is available. +#ifdef ABSL_HAVE_SCHED_GETCPU +#error ABSL_HAVE_SCHED_GETCPU cannot be directly set +#elif defined(__linux__) +#define ABSL_HAVE_SCHED_GETCPU 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \ + defined(__VXWORKS__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +// Signals don't exist on hexagon/QuRT +#elif defined(__hexagon__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS < 10.13 and iOS < 12 don't support , , or +// because the libc++ shared library shipped on the system doesn't have the +// requisite exported symbols. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been +// modified a few times, via +// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 +// and +// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a +// The second has the actually correct versions, thus, is what we copy here. +#if defined(__APPLE__) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)) +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_ANY 1 +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif + +// ABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef ABSL_HAVE_STD_VARIANT +#error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_VARIANT 1 +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// ABSL_USES_STD_ANY +// +// Indicates whether absl::any is an alias for std::any. +#if !defined(ABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ANY == 0 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) +#undef ABSL_USES_STD_ANY +#elif ABSL_OPTION_USE_STD_ANY == 1 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) +#define ABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::optional is an alias for std::optional. +#if !defined(ABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) +#undef ABSL_USES_STD_OPTIONAL +#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) +#define ABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_VARIANT +// +// Indicates whether absl::variant is an alias for std::variant. +#if !defined(ABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) +#undef ABSL_USES_STD_VARIANT +#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) +#define ABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::string_view is an alias for std::string_view. +#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(ABSL_HAVE_STD_STRING_VIEW)) +#undef ABSL_USES_STD_STRING_VIEW +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(ABSL_HAVE_STD_STRING_VIEW)) +#define ABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +// ABSL_INTERNAL_MANGLED_NS +// ABSL_INTERNAL_MANGLED_BACKREFERENCE +// +// Internal macros for building up mangled names in our internal fork of CCTZ. +// This implementation detail is only needed and provided for the MSVC build. +// +// These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is +// the mangled spelling of the `absl` namespace, and +// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing +// the proper count to skip past the CCTZ fork namespace names. (This number +// is one larger when there is an inline namespace name to skip.) +#if defined(_MSC_VER) +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_INTERNAL_MANGLED_NS "absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" +#else +#define ABSL_INTERNAL_MANGLED_NS \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) \ + "@absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" +#endif +#endif + +// ABSL_DLL +// +// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` +// so we can annotate symbols appropriately as being exported. When used in +// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so +// that consumers know the symbol is defined inside the DLL. In all other cases, +// the macro expands to nothing. +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_DLL) +#define ABSL_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_DLL) +#define ABSL_DLL __declspec(dllimport) +#else +#define ABSL_DLL +#endif +#else +#define ABSL_DLL +#endif // defined(_MSC_VER) + +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_TEST_DLL) +#define ABSL_TEST_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_TEST_DLL) +#define ABSL_TEST_DLL __declspec(dllimport) +#else +#define ABSL_TEST_DLL +#endif +#else +#define ABSL_TEST_DLL +#endif // defined(_MSC_VER) + +// ABSL_HAVE_MEMORY_SANITIZER +// +// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of +// a compiler instrumentation module and a run-time library. +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." +#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) +#define ABSL_HAVE_MEMORY_SANITIZER 1 +#endif + +// ABSL_HAVE_THREAD_SANITIZER +// +// ThreadSanitizer (TSan) is a fast data race detector. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_THREAD__) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(thread_sanitizer) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#endif + +// ABSL_HAVE_ADDRESS_SANITIZER +// +// AddressSanitizer (ASan) is a fast memory error detector. +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_ADDRESS__) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(address_sanitizer) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_HWADDRESS__) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_DATAFLOW_SANITIZER +// +// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis. +#ifdef ABSL_HAVE_DATAFLOW_SANITIZER +#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set." +#elif defined(DATAFLOW_SANITIZER) +// GCC provides no method for detecting the presence of the standalone +// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow +// should also use -DDATAFLOW_SANITIZER. +#define ABSL_HAVE_DATAFLOW_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(dataflow_sanitizer) +#define ABSL_HAVE_DATAFLOW_SANITIZER 1 +#endif + +// ABSL_HAVE_LEAK_SANITIZER +// +// LeakSanitizer (or lsan) is a detector of memory leaks. +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time +// whether the LeakSanitizer is potentially available. However, just because the +// LeakSanitizer is available does not mean it is active. Use the +// always-available run-time interface in //absl/debugging/leak_check.h for +// interacting with LeakSanitizer. +#ifdef ABSL_HAVE_LEAK_SANITIZER +#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." +#elif defined(LEAK_SANITIZER) +// GCC provides no method for detecting the presence of the standalone +// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also +// use -DLEAK_SANITIZER. +#define ABSL_HAVE_LEAK_SANITIZER 1 +// Clang standalone LeakSanitizer (-fsanitize=leak) +#elif ABSL_HAVE_FEATURE(leak_sanitizer) +#define ABSL_HAVE_LEAK_SANITIZER 1 +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) +// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. +#define ABSL_HAVE_LEAK_SANITIZER 1 +#endif + +// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// +// Class template argument deduction is a language feature added in C++17. +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." +#elif defined(__cpp_deduction_guides) +#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 +#endif + +// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// +// Prior to C++17, static constexpr variables defined in classes required a +// separate definition outside of the class body, for example: +// +// class Foo { +// static constexpr int kBar = 0; +// }; +// constexpr int Foo::kBar; +// +// In C++17, these variables defined in classes are considered inline variables, +// and the extra declaration is redundant. Since some compilers warn on the +// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used +// conditionally ignore them: +// +// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// constexpr int Foo::kBar; +// #endif +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L +#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 +#endif + +// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with +// RTTI support. +#ifdef ABSL_INTERNAL_HAS_RTTI +#error ABSL_INTERNAL_HAS_RTTI cannot be directly set +#elif (defined(__GNUC__) && defined(__GXX_RTTI)) || \ + (defined(_MSC_VER) && defined(_CPPRTTI)) || \ + (!defined(__GNUC__) && !defined(_MSC_VER)) +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif // !defined(__GNUC__) || defined(__GXX_RTTI) + +// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE +#error ABSL_INTERNAL_HAVE_SSE cannot be directly set +#elif defined(__SSE__) +#define ABSL_INTERNAL_HAVE_SSE 1 +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \ + !defined(_M_ARM64EC) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 +// indicates that at least SSE was targeted with the /arch:SSE option. +// All x86-64 processors support SSE, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE 1 +#endif + +// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set +#elif defined(__SSE2__) +#define ABSL_INTERNAL_HAVE_SSE2 1 +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \ + !defined(_M_ARM64EC) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 +// indicates that at least SSE2 was targeted with the /arch:SSE2 option. +// All x86-64 processors support SSE2, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE2 1 +#endif + +// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +// +// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 +// with MSVC requires either assuming that the code will only every run on CPUs +// that support SSSE3, otherwise __cpuid() can be used to detect support at +// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported +// by the CPU. +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set +#elif defined(__SSSE3__) +#define ABSL_INTERNAL_HAVE_SSSE3 1 +#endif + +// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM +// SIMD). +// +// If __CUDA_ARCH__ is defined, then we are compiling CUDA code in device mode. +// In device mode, NEON intrinsics are not available, regardless of host +// platform. +// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set +#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__) +#define ABSL_INTERNAL_HAVE_ARM_NEON 1 +#endif + +// ABSL_HAVE_CONSTANT_EVALUATED is used for compile-time detection of +// constant evaluation support through `absl::is_constant_evaluated`. +#ifdef ABSL_HAVE_CONSTANT_EVALUATED +#error ABSL_HAVE_CONSTANT_EVALUATED cannot be directly set +#endif +#ifdef __cpp_lib_is_constant_evaluated +#define ABSL_HAVE_CONSTANT_EVALUATED 1 +#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated) +#define ABSL_HAVE_CONSTANT_EVALUATED 1 +#endif + +// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros +// into an integer that can be compared against. +#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION +#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set +#endif +#ifdef __EMSCRIPTEN__ +#include +#ifdef __EMSCRIPTEN_major__ +#if __EMSCRIPTEN_minor__ >= 1000 +#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION +#endif +#if __EMSCRIPTEN_tiny__ >= 1000 +#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION +#endif +#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \ + ((__EMSCRIPTEN_major__)*1000000 + (__EMSCRIPTEN_minor__)*1000 + \ + (__EMSCRIPTEN_tiny__)) +#endif +#endif + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/const_init.h b/CAPI/cpp/grpc/include/absl/base/const_init.h new file mode 100644 index 00000000..4a0ef137 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/const_init.h @@ -0,0 +1,78 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + enum ConstInitType + { + kConstInit, + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h new file mode 100644 index 00000000..57941f63 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h @@ -0,0 +1,515 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ABSL_ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#ifdef __cplusplus +#include "absl/base/macros.h" +#endif + +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#include +#endif + +// TODO(rogeeff): Remove after the backward compatibility period. +#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export + +// ------------------------------------------------------------------------- +// Decide which features are enabled. + +#ifdef ABSL_HAVE_THREAD_SANITIZER + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +#if defined(__clang__) +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 +#if !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif +#else +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#endif + +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED + +#endif // ABSL_HAVE_THREAD_SANITIZER + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C \ + extern "C" \ + { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based sanitizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \ + (__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \ + (__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \ + (__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace \ + { \ + class static_var##_annotator \ + { \ + public: \ + static_var##_annotator() \ + { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateRWLockCreate(const char* file, int line, const volatile void* lock); +void AnnotateRWLockCreateStatic(const char* file, int line, const volatile void* lock); +void AnnotateRWLockDestroy(const char* file, int line, const volatile void* lock); +void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w); // NOLINT +void AnnotateBenignRace(const char* file, int line, const volatile void* address, const char* description); +void AnnotateBenignRaceSized(const char* file, int line, const volatile void* address, size_t size, const char* description); +void AnnotateThreadName(const char* file, int line, const char* name); +void AnnotateEnableRaceDetection(const char* file, int line, int enable); +ABSL_INTERNAL_END_EXTERN_C + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ABSL_ANNOTATE_THREAD_NAME(name) // empty +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#ifdef ABSL_HAVE_MEMORY_SANITIZER + +#include + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) + +// TODO(rogeeff): remove this branch +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty + +#endif + +#endif // ABSL_HAVE_MEMORY_SANITIZER + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// Request the analysis tool to ignore all reads in the current thread until +// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ABSL_ANNOTATE_UNPROTECTED_READ. +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreReadsBegin(const char* file, int line) + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +ABSL_INTERNAL_END_EXTERN_C + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin) \ + ) \ + () + +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd) \ + ) \ + () + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsBegin +)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE +{ +} + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsEnd +)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE +{ +} + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring writes. +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreWritesBegin(const char* file, int line); +void AnnotateIgnoreWritesEnd(const char* file, int line); +ABSL_INTERNAL_END_EXTERN_C + +#else + +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ABSL_ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ABSL_ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do \ + { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do \ + { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + template + inline T AnnotateUnprotectedRead(const volatile T& x) + { // NOLINT + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ABSL_ANNOTATE_IGNORE_READS_END(); + return res; + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl +#endif + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ + struct \ + { \ + alignas(8) char x[8]; \ + } name + +#else + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// HWAddress sanitizer annotations + +#ifdef __cplusplus +namespace absl +{ +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER + // Under HWASAN changes the tag of the pointer. + template + T* HwasanTagPointer(T* ptr, uintptr_t tag) + { + return reinterpret_cast(__hwasan_tag_pointer(ptr, tag)); + } +#else + template + T* HwasanTagPointer(T* ptr, uintptr_t) + { + return ptr; + } +#endif +} // namespace absl +#endif + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h new file mode 100644 index 00000000..8cef0e14 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h @@ -0,0 +1,225 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(_MSC_VER) && !defined(__clang__) +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 +#else +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 +#endif + +#if defined(_MSC_VER) +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 +#else +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + template + class AtomicHook; + +// To workaround AtomicHook not being constant-initializable on some platforms, +// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` +// instead of `ABSL_CONST_INIT`. +#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT +#else +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +#endif + + // `AtomicHook` is a helper class, templatized on a raw function pointer type, + // for implementing Abseil customization hooks. It is a callable object that + // dispatches to the registered hook. Objects of type `AtomicHook` must have + // static or thread storage duration. + // + // A default constructed object performs a no-op (and returns a default + // constructed object) if no hook has been registered. + // + // Hooks can be pre-registered via constant initialization, for example: + // + // ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook + // my_hook(DefaultAction); + // + // and then changed at runtime via a call to `Store()`. + // + // Reads and writes guarantee memory_order_acquire/memory_order_release + // semantics. + template + class AtomicHook + { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : + AtomicHook(DummyFunction) + { + } + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) : + hook_(default_fn), + default_fn_(default_fn) + { + } +#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) : + hook_(kUninitialized), + default_fn_(default_fn) + { + } +#else + // As of January 2020, on all known versions of MSVC this constructor runs in + // the global constructor sequence. If `Store()` is called by a dynamic + // initializer, we want to preserve the value, even if this constructor runs + // after the call to `Store()`. If not, `hook_` will be + // zero-initialized by the linker and we have no need to set it. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) : + /* hook_(deliberately omitted), */ default_fn_(default_fn) + { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } +#endif + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) + { + bool success = DoStore(fn); + static_cast(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const + { + return DoLoad()(std::forward(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const + { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) + { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const + { + return hook_.load(std::memory_order_acquire); + } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) + { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire + ); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const + { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) + { + return default_fn_; + } + return reinterpret_cast(value); + } + + bool DoStore(FnPtr fn) + { + assert(fn); + const auto value = reinterpret_cast(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire + ); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#endif + + const FnPtr default_fn_; + }; + +#undef ABSL_HAVE_WORKING_ATOMIC_POINTER +#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 00000000..d7c4cf3f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,36 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace atomic_hook_internal + { + + using VoidF = void (*)(); + extern absl::base_internal::AtomicHook func; + extern int default_func_calls; + void DefaultFunc(); + void RegisterFunc(VoidF func); + + } // namespace atomic_hook_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h new file mode 100644 index 00000000..35314e2d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h @@ -0,0 +1,153 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock_config.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + using CycleClockSourceFunc = int64_t (*)(); + + // ----------------------------------------------------------------------------- + // CycleClock + // ----------------------------------------------------------------------------- + class CycleClock + { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: +#if ABSL_USE_UNSCALED_CYCLECLOCK + static CycleClockSourceFunc LoadCycleClockSource(); + + static constexpr int32_t kShift = kCycleClockShift; + static constexpr double kFrequencyScale = kCycleClockFrequencyScale; + + ABSL_CONST_INIT static std::atomic cycle_clock_source_; +#endif // ABSL_USE_UNSCALED_CYCLECLOC + + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; + + friend class CycleClockSource; + }; + + class CycleClockSource + { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); + }; + +#if ABSL_USE_UNSCALED_CYCLECLOCK + + inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() + { +#if !defined(__x86_64__) + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) + { + return nullptr; + } +#endif // !defined(__x86_64__) + + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); + } + +// Accessing globals in inlined code in Window DLLs is problematic. +#ifndef _WIN32 + inline int64_t CycleClock::Now() + { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) + { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; + } +#endif + + inline double CycleClock::Frequency() + { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); + } + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/cycleclock_config.h b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock_config.h new file mode 100644 index 00000000..e6223bde --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock_config.h @@ -0,0 +1,56 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + +#if ABSL_USE_UNSCALED_CYCLECLOCK +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + // Not debug mode and the UnscaledCycleClock frequency is the CPU + // frequency. Scale the CycleClock to prevent overflow if someone + // tries to represent the time as cycles since the Unix epoch. + ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1); +#else + // Not debug mode and the UnscaledCycleClock isn't operating at the + // raw CPU frequency. There is no need to do any scaling, so don't + // needlessly sacrifice precision. + ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0); +#endif +#else // NDEBUG + // In debug mode use a different shift to discourage depending on a + // particular shift value. + ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2); +#endif // NDEBUG + + ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale, 1.0 / (1 << kCycleClockShift)); +#endif // ABSL_USE_UNSCALED_CYCLECLOC + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h new file mode 100644 index 00000000..2d5007ae --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h @@ -0,0 +1,178 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_MMAP + +#include + +#ifdef __linux__ + +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#if defined(__BIONIC__) || !defined(__GLIBC__) +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ || !__GLIBC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Platform specific logic extracted from + // https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h + inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off_t offset) noexcept + { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + defined(__m68k__) || defined(__sh__) || \ + (defined(__hppa__) && !defined(__LP64__)) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) || \ + (defined(__sparc__) && !defined(__arch64__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) + { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) + { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, static_cast(offset / pagesize)); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize)) + ); // NOLINT +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), MMAP_SYSCALL_ARG(fd), static_cast(offset)) + ); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset) + ); +#endif + } + + inline int DirectMunmap(void* start, size_t length) + { + return static_cast(syscall(SYS_munmap, start, length)); + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off_t offset) + { + return mmap(start, length, prot, flags, fd, offset); + } + + inline int DirectMunmap(void* start, size_t length) + { + return munmap(start, length); + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h new file mode 100644 index 00000000..4fb5e139 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h @@ -0,0 +1,416 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Decide which features are enabled + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +#define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__clang__) && !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#endif + +// Memory annotations are also made available to LLVM's Memory Sanitizer +#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 +#endif + +#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 +#endif + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C \ + extern "C" \ + { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \ + (__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \ + (__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \ + (__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace \ + { \ + class static_var##_annotator \ + { \ + public: \ + static_var##_annotator() \ + { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ANNOTATE_BENIGN_RACE(address, description) // empty +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ANNOTATE_THREAD_NAME(name) // empty +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 + +#include + +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty +#endif + +#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 + +// Request the analysis tool to ignore all reads in the current thread until +// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ANNOTATE_UNPROTECTED_READ. +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin) \ + () + +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd) \ + () + +#else + +#define ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring writes. +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \ + (__FILE__, __LINE__) + +#else + +#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do \ + { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do \ + { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +#endif + +#else + +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct \ + { \ + char x[8] __attribute__((aligned(8))); \ + } name + +#else + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/endian.h b/CAPI/cpp/grpc/include/absl/base/internal/endian.h new file mode 100644 index 00000000..4079f84a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/endian.h @@ -0,0 +1,472 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + inline uint64_t gbswap_64(uint64_t host_int) + { +#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) + return __builtin_bswap64(host_int); +#elif defined(_MSC_VER) + return _byteswap_uint64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | ((host_int & uint64_t{0xFF0000}) << 24) | ((host_int & uint64_t{0xFF000000}) << 8) | ((host_int & uint64_t{0xFF00000000}) >> 8) | ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif + } + + inline uint32_t gbswap_32(uint32_t host_int) + { +#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) + return __builtin_bswap32(host_int); +#elif defined(_MSC_VER) + return _byteswap_ulong(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | ((host_int & uint32_t{0xFF0000}) >> 8) | ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif + } + + inline uint16_t gbswap_16(uint16_t host_int) + { +#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) + return __builtin_bswap16(host_int); +#elif defined(_MSC_VER) + return _byteswap_ushort(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8)); +#endif + } + +#ifdef ABSL_IS_LITTLE_ENDIAN + + // Portable definitions for htonl (host-to-network) and friends on little-endian + // architectures. + inline uint16_t ghtons(uint16_t x) + { + return gbswap_16(x); + } + inline uint32_t ghtonl(uint32_t x) + { + return gbswap_32(x); + } + inline uint64_t ghtonll(uint64_t x) + { + return gbswap_64(x); + } + +#elif defined ABSL_IS_BIG_ENDIAN + + // Portable definitions for htonl (host-to-network) etc on big-endian + // architectures. These definitions are simpler since the host byte order is the + // same as network byte order. + inline uint16_t ghtons(uint16_t x) + { + return x; + } + inline uint32_t ghtonl(uint32_t x) + { + return x; + } + inline uint64_t ghtonll(uint64_t x) + { + return x; + } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + + inline uint16_t gntohs(uint16_t x) + { + return ghtons(x); + } + inline uint32_t gntohl(uint32_t x) + { + return ghtonl(x); + } + inline uint64_t gntohll(uint64_t x) + { + return ghtonll(x); + } + + // Utilities to convert numbers between the current hosts's native byte + // order and little-endian byte order + // + // Load/Store methods are alignment safe + namespace little_endian + { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + + inline uint16_t FromHost16(uint16_t x) + { + return x; + } + inline uint16_t ToHost16(uint16_t x) + { + return x; + } + + inline uint32_t FromHost32(uint32_t x) + { + return x; + } + inline uint32_t ToHost32(uint32_t x) + { + return x; + } + + inline uint64_t FromHost64(uint64_t x) + { + return x; + } + inline uint64_t ToHost64(uint64_t x) + { + return x; + } + + inline constexpr bool IsLittleEndian() + { + return true; + } + +#elif defined ABSL_IS_BIG_ENDIAN + + inline uint16_t FromHost16(uint16_t x) + { + return gbswap_16(x); + } + inline uint16_t ToHost16(uint16_t x) + { + return gbswap_16(x); + } + + inline uint32_t FromHost32(uint32_t x) + { + return gbswap_32(x); + } + inline uint32_t ToHost32(uint32_t x) + { + return gbswap_32(x); + } + + inline uint64_t FromHost64(uint64_t x) + { + return gbswap_64(x); + } + inline uint64_t ToHost64(uint64_t x) + { + return gbswap_64(x); + } + + inline constexpr bool IsLittleEndian() + { + return false; + } + +#endif /* ENDIAN */ + + inline uint8_t FromHost(uint8_t x) + { + return x; + } + inline uint16_t FromHost(uint16_t x) + { + return FromHost16(x); + } + inline uint32_t FromHost(uint32_t x) + { + return FromHost32(x); + } + inline uint64_t FromHost(uint64_t x) + { + return FromHost64(x); + } + inline uint8_t ToHost(uint8_t x) + { + return x; + } + inline uint16_t ToHost(uint16_t x) + { + return ToHost16(x); + } + inline uint32_t ToHost(uint32_t x) + { + return ToHost32(x); + } + inline uint64_t ToHost(uint64_t x) + { + return ToHost64(x); + } + + inline int8_t FromHost(int8_t x) + { + return x; + } + inline int16_t FromHost(int16_t x) + { + return bit_cast(FromHost16(bit_cast(x))); + } + inline int32_t FromHost(int32_t x) + { + return bit_cast(FromHost32(bit_cast(x))); + } + inline int64_t FromHost(int64_t x) + { + return bit_cast(FromHost64(bit_cast(x))); + } + inline int8_t ToHost(int8_t x) + { + return x; + } + inline int16_t ToHost(int16_t x) + { + return bit_cast(ToHost16(bit_cast(x))); + } + inline int32_t ToHost(int32_t x) + { + return bit_cast(ToHost32(bit_cast(x))); + } + inline int64_t ToHost(int64_t x) + { + return bit_cast(ToHost64(bit_cast(x))); + } + + // Functions to do unaligned loads and stores in little-endian order. + inline uint16_t Load16(const void* p) + { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); + } + + inline void Store16(void* p, uint16_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); + } + + inline uint32_t Load32(const void* p) + { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); + } + + inline void Store32(void* p, uint32_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); + } + + inline uint64_t Load64(const void* p) + { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); + } + + inline void Store64(void* p, uint64_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); + } + + } // namespace little_endian + + // Utilities to convert numbers between the current hosts's native byte + // order and big-endian byte order (same as network byte order) + // + // Load/Store methods are alignment safe + namespace big_endian + { +#ifdef ABSL_IS_LITTLE_ENDIAN + + inline uint16_t FromHost16(uint16_t x) + { + return gbswap_16(x); + } + inline uint16_t ToHost16(uint16_t x) + { + return gbswap_16(x); + } + + inline uint32_t FromHost32(uint32_t x) + { + return gbswap_32(x); + } + inline uint32_t ToHost32(uint32_t x) + { + return gbswap_32(x); + } + + inline uint64_t FromHost64(uint64_t x) + { + return gbswap_64(x); + } + inline uint64_t ToHost64(uint64_t x) + { + return gbswap_64(x); + } + + inline constexpr bool IsLittleEndian() + { + return true; + } + +#elif defined ABSL_IS_BIG_ENDIAN + + inline uint16_t FromHost16(uint16_t x) + { + return x; + } + inline uint16_t ToHost16(uint16_t x) + { + return x; + } + + inline uint32_t FromHost32(uint32_t x) + { + return x; + } + inline uint32_t ToHost32(uint32_t x) + { + return x; + } + + inline uint64_t FromHost64(uint64_t x) + { + return x; + } + inline uint64_t ToHost64(uint64_t x) + { + return x; + } + + inline constexpr bool IsLittleEndian() + { + return false; + } + +#endif /* ENDIAN */ + + inline uint8_t FromHost(uint8_t x) + { + return x; + } + inline uint16_t FromHost(uint16_t x) + { + return FromHost16(x); + } + inline uint32_t FromHost(uint32_t x) + { + return FromHost32(x); + } + inline uint64_t FromHost(uint64_t x) + { + return FromHost64(x); + } + inline uint8_t ToHost(uint8_t x) + { + return x; + } + inline uint16_t ToHost(uint16_t x) + { + return ToHost16(x); + } + inline uint32_t ToHost(uint32_t x) + { + return ToHost32(x); + } + inline uint64_t ToHost(uint64_t x) + { + return ToHost64(x); + } + + inline int8_t FromHost(int8_t x) + { + return x; + } + inline int16_t FromHost(int16_t x) + { + return bit_cast(FromHost16(bit_cast(x))); + } + inline int32_t FromHost(int32_t x) + { + return bit_cast(FromHost32(bit_cast(x))); + } + inline int64_t FromHost(int64_t x) + { + return bit_cast(FromHost64(bit_cast(x))); + } + inline int8_t ToHost(int8_t x) + { + return x; + } + inline int16_t ToHost(int16_t x) + { + return bit_cast(ToHost16(bit_cast(x))); + } + inline int32_t ToHost(int32_t x) + { + return bit_cast(ToHost32(bit_cast(x))); + } + inline int64_t ToHost(int64_t x) + { + return bit_cast(ToHost64(bit_cast(x))); + } + + // Functions to do unaligned loads and stores in big-endian order. + inline uint16_t Load16(const void* p) + { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); + } + + inline void Store16(void* p, uint16_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); + } + + inline uint32_t Load32(const void* p) + { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); + } + + inline void Store32(void* p, uint32_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); + } + + inline uint64_t Load64(const void* p) + { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); + } + + inline void Store64(void* p, uint64_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); + } + + } // namespace big_endian + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h new file mode 100644 index 00000000..4140befe --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h @@ -0,0 +1,55 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ +#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // `ErrnoSaver` captures the value of `errno` upon construction and restores it + // upon deletion. It is used in low-level code and must be super fast. Do not + // add instrumentation, even in debug modes. + class ErrnoSaver + { + public: + ErrnoSaver() : + saved_errno_(errno) + { + } + ~ErrnoSaver() + { + errno = saved_errno_; + } + int operator()() const + { + return saved_errno_; + } + + private: + const int saved_errno_; + }; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h new file mode 100644 index 00000000..1c116812 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1345 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" + +namespace testing +{ + + enum class TypeSpec; + enum class AllocSpec; + + constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); + } + + constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); + } + + constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); + } + + constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); + } + + namespace exceptions_internal + { + + std::string GetSpecString(TypeSpec); + std::string GetSpecString(AllocSpec); + + struct NoThrowTag + { + }; + struct StrongGuaranteeTagType + { + }; + + // A simple exception class. We throw this so that test code can catch + // exceptions specifically thrown by ThrowingValue. + class TestException + { + public: + explicit TestException(absl::string_view msg) : + msg_(msg) + { + } + virtual ~TestException() + { + } + virtual const char* what() const noexcept + { + return msg_.c_str(); + } + + private: + std::string msg_; + }; + + // TestBadAllocException exists because allocation functions must throw an + // exception which can be caught by a handler of std::bad_alloc. We use a child + // class of std::bad_alloc so we can customise the error message, and also + // derive from TestException so we don't accidentally end up catching an actual + // bad_alloc exception in TestExceptionSafety. + class TestBadAllocException : public std::bad_alloc, public TestException + { + public: + explicit TestBadAllocException(absl::string_view msg) : + TestException(msg) + { + } + using TestException::what; + }; + + extern int countdown; + + // Allows the countdown variable to be set manually (defaulting to the initial + // value of 0) + inline void SetCountdown(int i = 0) + { + countdown = i; + } + // Sets the countdown to the terminal value -1 + inline void UnsetCountdown() + { + SetCountdown(-1); + } + + void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + + testing::AssertionResult FailureMessage(const TestException& e, int countdown) noexcept; + + struct TrackedAddress + { + bool is_alive; + std::string description; + }; + + // Inspects the constructions and destructions of anything inheriting from + // TrackedObject. This allows us to safely "leak" TrackedObjects, as + // ConstructorTracker will destroy everything left over in its destructor. + class ConstructorTracker + { + public: + explicit ConstructorTracker(int count) : + countdown_(count) + { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() + { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) + { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) + { + if (!CurrentlyTracking()) + return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-constructed. Current object was constructed by " + description + ); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) + { + if (!CurrentlyTracking()) + return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) + return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() + { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, const std::string& address_description, int countdown, const std::string& error_description) + { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, + error_description, + address_description, + address + ); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; + }; + + class TrackedObject + { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) + { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept + { + ConstructorTracker::ObjectDestructed(this); + } + }; + } // namespace exceptions_internal + + extern exceptions_internal::NoThrowTag nothrow_ctor; + + extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + + // A test class which is convertible to bool. The conversion can be + // instrumented to throw at a controlled time. + class ThrowingBool + { + public: + ThrowingBool(bool b) noexcept : + b_(b) + { + } // NOLINT(runtime/explicit) + operator bool() const + { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + + private: + bool b_; + }; + + /* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ + enum class TypeSpec + { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, + }; + + /* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ + template + class ThrowingValue : private exceptions_internal::TrackedObject + { + static constexpr bool IsSpecified(TypeSpec spec) + { + return static_cast(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + + public: + ThrowingValue() : + TrackedObject(GetInstanceString(kDefaultValue)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy) + ) : + TrackedObject(GetInstanceString(other.dummy_)) + { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove) + ) : + TrackedObject(GetInstanceString(other.dummy_)) + { + if (!IsSpecified(TypeSpec::kNoThrowMove)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : + TrackedObject(GetInstanceString(i)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : + TrackedObject(GetInstanceString(i)), + dummy_(i) + { + } + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy) + ) + { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove) + ) + { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + static void* operator new(size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void* operator new[](size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + + template + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + void operator delete(void* p) noexcept + { + ::operator delete(p); + } + + template + void operator delete(void* p, Args&&... args) noexcept + { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void* p) noexcept + { + return ::operator delete[](p); + } + + template + void operator delete[](void* p, Args&&... args) noexcept + { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept + { + return dummy_; + } + const int& Get() const noexcept + { + return dummy_; + } + + private: + static std::string GetInstanceString(int dummy) + { + return absl::StrCat("ThrowingValue<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); + } + + int dummy_; + }; + // While not having to do with exceptions, explicitly delete comma operator, to + // make sure we don't use it on user-supplied types. + template + void operator,(const ThrowingValue&, T&&) = delete; + template + void operator,(T&&, const ThrowingValue&) = delete; + + /* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ + enum class AllocSpec + { + kEverythingThrows = 0, + kNoThrowAllocate = 1, + }; + + /* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ + template + class ThrowingAllocator : private exceptions_internal::TrackedObject + { + static constexpr bool IsSpecified(AllocSpec spec) + { + return static_cast(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : + TrackedObject(GetInstanceString(next_id_)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) + { + } + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) + { + } + + template + ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) + { + } + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) + { + } + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept + { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=( + const ThrowingAllocator& other + ) noexcept + { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept + { + dummy_ = std::move(other.State()); + return *this; + } + + template + struct rebind + { + using other = ThrowingAllocator; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept + { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template + void destroy(U* p) noexcept + { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept + { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return *this; + } + + template + bool operator==(const ThrowingAllocator& other) const noexcept + { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator& other) const noexcept + { + return dummy_ != other.dummy_; + } + + template + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) + { + return absl::StrCat("ThrowingAllocator<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); + } + + const std::shared_ptr& State() const + { + return dummy_; + } + std::shared_ptr& State() + { + return dummy_; + } + + void ReadState() + { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) + std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const + { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) + { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg) + ); + } + } + + static int next_id_; + std::shared_ptr dummy_; + }; + + template + int ThrowingAllocator::next_id_ = 0; + + // Tests for resource leaks by attempting to construct a T using args repeatedly + // until successful, using the countdown method. Side effects can then be + // tested for resource leaks. + template + void TestThrowingCtor(Args&&... args) + { + struct Cleanup + { + ~Cleanup() + { + exceptions_internal::UnsetCountdown(); + } + } c; + for (int count = 0;; ++count) + { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try + { + T temp(std::forward(args)...); + static_cast(temp); + break; + } + catch (const exceptions_internal::TestException&) + { + } + } + } + + // Tests the nothrow guarantee of the provided nullary operation. If the an + // exception is thrown, the result will be AssertionFailure(). Otherwise, it + // will be AssertionSuccess(). + template + testing::AssertionResult TestNothrowOp(const Operation& operation) + { + struct Cleanup + { + Cleanup() + { + exceptions_internal::SetCountdown(); + } + ~Cleanup() + { + exceptions_internal::UnsetCountdown(); + } + } c; + try + { + operation(); + return testing::AssertionSuccess(); + } + catch (const exceptions_internal::TestException&) + { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } + catch (...) + { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } + } + + namespace exceptions_internal + { + + // Dummy struct for ExceptionSafetyTestBuilder<> partial state. + struct UninitializedT + { + }; + + template + class DefaultFactory + { + public: + explicit DefaultFactory(const T& t) : + t_(t) + { + } + std::unique_ptr operator()() const + { + return absl::make_unique(t_); + } + + private: + T t_; + }; + + template + using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + + template + class ExceptionSafetyTestBuilder; + + } // namespace exceptions_internal + + /* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ + exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + + namespace exceptions_internal + { + template + struct IsUniquePtr : std::false_type + { + }; + + template + struct IsUniquePtr> : std::true_type + { + }; + + template + struct FactoryPtrTypeHelper + { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); + }; + + template + using FactoryPtrType = typename FactoryPtrTypeHelper::type; + + template + using FactoryElementType = typename FactoryPtrType::element_type; + + template + class ExceptionSafetyTest + { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, const Contracts&... contracts) : + factory_(f), + operation_(op), + contracts_{WrapContract(contracts)...} + { + } + + AssertionResult Test() const + { + for (int count = 0;; ++count) + { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) + { + auto t_ptr = factory_(); + try + { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } + catch (const exceptions_internal::TestException& e) + { + if (!contract(t_ptr.get())) + { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) + { + return [contract](T* t_ptr) + { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) + { + return [this](T* t_ptr) + { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; + }; + + /* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ + template + class ExceptionSafetyTestBuilder + { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T& t) const + { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const + { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation& new_operation) const + { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const + { + return { + factory_, operation_, std::tuple_cat(contracts_, std::tuple...>(more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template< + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation& new_operation) const + { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template< + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const + { + return Test(operation_); + } + + private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() + { + } + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, const std::tuple& i) : + factory_(f), + operation_(o), + contracts_(i) + { + } + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, absl::index_sequence) const + { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)... + ) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; + }; + + } // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h new file mode 100644 index 00000000..4ac46ad5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h new file mode 100644 index 00000000..6f199d12 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h @@ -0,0 +1,54 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + template + struct FastTypeTag + { + constexpr static char dummy_var = 0; + }; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + template + constexpr char FastTypeTag::dummy_var; +#endif + + // FastTypeId() evaluates at compile/link-time to a unique pointer for the + // passed-in type. These are meant to be good match for keys into maps or + // straight up comparisons. + using FastTypeIdType = const void*; + + template + constexpr inline FastTypeIdType FastTypeId() + { + return &FastTypeTag::dummy_var; + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h new file mode 100644 index 00000000..4249bc29 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h @@ -0,0 +1,56 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Arbitrary value with high bits set. Xor'ing with it is unlikely + // to map one valid pointer to another valid pointer. + constexpr uintptr_t HideMask() + { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; + } + + // Hide a pointer from the leak checker. For internal use only. + // Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr + // and all objects reachable from ptr to be ignored by the leak checker. + template + inline uintptr_t HidePtr(T* ptr) + { + return reinterpret_cast(ptr) ^ HideMask(); + } + + // Return a pointer that has been hidden from the leak checker. + // For internal use only. + template + inline T* UnhidePtr(uintptr_t hidden) + { + return reinterpret_cast(hidden ^ HideMask()); + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/identity.h b/CAPI/cpp/grpc/include/absl/base/internal/identity.h new file mode 100644 index 00000000..d62e5562 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/identity.h @@ -0,0 +1,40 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace internal + { + + template + struct identity + { + typedef T type; + }; + + template + using identity_t = typename identity::type; + + } // namespace internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h new file mode 100644 index 00000000..b38f0b02 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ + +#include + +#include "absl/base/internal/identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::internal::identity_t name; +#else // Otherwise, just define the macro to do nothing. +#define ABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::identity_t name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name \ + { \ + static constexpr ::absl::internal::identity_t kInstance = init; \ + }; \ + \ + template \ + constexpr ::absl::internal::identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h new file mode 100644 index 00000000..093eb48d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,49 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace inline_variable_testing_internal + { + + struct Foo + { + int value = 5; + }; + + ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + + ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); + ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + + ABSL_INTERNAL_INLINE_CONSTEXPR(void (*)(), inline_variable_fun_ptr, nullptr); + + const Foo& get_foo_a(); + const Foo& get_foo_b(); + + const int& get_int_a(); + const int& get_int_b(); + + } // namespace inline_variable_testing_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/invoke.h b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h new file mode 100644 index 00000000..2316378d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h @@ -0,0 +1,267 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// When compiled as C++17 and later versions, it is implemented as an alias of +// std::invoke. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include "absl/base/config.h" + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + using std::invoke; + using std::invoke_result_t; + using std::is_invocable_r; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include +#include +#include + +#include "absl/meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // The five classes below each implement one of the clauses from the definition + // of INVOKE. The inner class template Accept checks whether the + // clause is applicable; static function template Invoke(f, args...) does the + // invocation. + // + // By separating the clause selection logic from invocation we make sure that + // Invoke() does exactly what the standard says. + + template + struct StrippedAccept + { + template + struct Accept : Derived::template AcceptImpl::type>::type...> + { + }; + }; + + // (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T + // and t1 is an object of type T or a reference to an object of type T or a + // reference to an object of a type derived from T. + struct MemFunAndRef : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && absl::is_function::value> + { + }; + + template + static decltype((std::declval().*std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) + { +// Ignore bogus GCC warnings on this line. +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return (std::forward(obj).*std::forward(mem_fun))(std::forward(args)...); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic pop +#endif + } + }; + + // ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a + // class T and t1 is not one of the types described in the previous item. + struct MemFunAndPtr : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && absl::is_function::value> + { + }; + + template + static decltype(((*std::declval()).*std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) + { + return ((*std::forward(ptr)).*std::forward(mem_fun))(std::forward(args)...); + } + }; + + // t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is + // an object of type T or a reference to an object of type T or a reference + // to an object of a type derived from T. + struct DataMemAndRef : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> + { + }; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref + ) + { + return std::forward(ref).*std::forward(data_mem); + } + }; + + // (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 + // is not one of the types described in the previous item. + struct DataMemAndPtr : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> + { + }; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr + ) + { + return (*std::forward(ptr)).*std::forward(data_mem); + } + }; + + // f(t1, t2, ..., tN) in all other cases. + struct Callable + { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args + ) + { + return std::forward(f)(std::forward(args)...); + } + }; + + // Resolves to the first matching clause. + template + struct Invoker + { + typedef typename std::conditional< + MemFunAndRef::Accept::value, + MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, + MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, + DataMemAndRef, + typename std::conditional::value, DataMemAndPtr, Callable>::type>::type>:: + type>::type type; + }; + + // The result type of Invoke. + template + using invoke_result_t = decltype(Invoker::type::Invoke( + std::declval(), std::declval()... + )); + + // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section + // [func.require] of the C++ standard. + template + invoke_result_t invoke(F&& f, Args&&... args) + { + return Invoker::type::Invoke(std::forward(f), std::forward(args)...); + } + + template + struct IsInvocableRImpl : std::false_type + { + }; + + template + struct IsInvocableRImpl< + absl::void_t>, + R, + F, + Args...> : std::integral_constant, R>::value || std::is_void::value> + { + }; + + // Type trait whose member `value` is true if invoking `F` with `Args` is valid, + // and either the return type is convertible to `R`, or `R` is void. + // C++11-compatible version of `std::is_invocable_r`. + template + using is_invocable_r = IsInvocableRImpl; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h new file mode 100644 index 00000000..a7b62733 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h @@ -0,0 +1,131 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \ + defined(__hexagon__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class LowLevelAlloc + { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void* Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void* AllocWithArena(size_t request, Arena* arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void* s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum + { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena* NewArena(uint32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena* arena); + + // The default arena that always exists. + static Arena* DefaultArena(); + + private: + LowLevelAlloc(); // no instances + }; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h new file mode 100644 index 00000000..ed2f3111 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,152 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + class CondVar; + class Mutex; + + namespace synchronization_internal + { + int MutexDelay(int32_t c, int mode); + } // namespace synchronization_internal + + namespace base_internal + { + + class SchedulingHelper; // To allow use of SchedulingGuard. + class SpinLock; // To allow use of SchedulingGuard. + + // SchedulingGuard + // Provides guard semantics that may be used to disable cooperative rescheduling + // of the calling thread within specific program blocks. This is used to + // protect resources (e.g. low-level SpinLocks or Domain code) that cooperative + // scheduling depends on. + // + // Domain implementations capable of rescheduling in reaction to involuntary + // kernel thread actions (e.g blocking due to a pagefault or syscall) must + // guarantee that an annotated thread is not allowed to (cooperatively) + // reschedule until the annotated region is complete. + // + // It is an error to attempt to use a cooperatively scheduled resource (e.g. + // Mutex) within a rescheduling-disabled region. + // + // All methods are async-signal safe. + class SchedulingGuard + { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable + { + ScopedDisable() + { + disabled = SchedulingGuard::DisableRescheduling(); + } + ~ScopedDisable() + { + SchedulingGuard::EnableRescheduling(disabled); + } + + bool disabled; + }; + + // A scoped helper to enable rescheduling temporarily. + // REQUIRES: destructor must run in same thread as constructor. + class ScopedEnable + { + public: + ScopedEnable(); + ~ScopedEnable(); + + private: + int scheduling_disabled_depth_; + }; + + // Access to SchedulingGuard is explicitly permitted. + friend class absl::CondVar; + friend class absl::Mutex; + friend class SchedulingHelper; + friend class SpinLock; + friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); + }; + + //------------------------------------------------------------------------------ + // End of public interfaces. + //------------------------------------------------------------------------------ + + inline bool SchedulingGuard::ReschedulingIsAllowed() + { + return false; + } + + inline bool SchedulingGuard::DisableRescheduling() + { + return false; + } + + inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) + { + return; + } + + inline SchedulingGuard::ScopedEnable::ScopedEnable() : + scheduling_disabled_depth_(0) + { + } + inline SchedulingGuard::ScopedEnable::~ScopedEnable() + { + ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/nullability_impl.h b/CAPI/cpp/grpc/include/absl/base/internal/nullability_impl.h new file mode 100644 index 00000000..a8842b02 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/nullability_impl.h @@ -0,0 +1,112 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ +#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + + namespace nullability_internal + { + + // `IsNullabilityCompatible` checks whether its first argument is a class + // explicitly tagged as supporting nullability annotations. The tag is the type + // declaration `absl_nullability_compatible`. + template + struct IsNullabilityCompatible : std::false_type + { + }; + + template + struct IsNullabilityCompatible< + T, + absl::void_t> : std::true_type + { + }; + + template + constexpr bool IsSupportedType = IsNullabilityCompatible::value; + + template + constexpr bool IsSupportedType = true; + + template + constexpr bool IsSupportedType = true; + + template + constexpr bool IsSupportedType> = true; + + template + constexpr bool IsSupportedType> = true; + + template + struct EnableNullable + { + static_assert(nullability_internal::IsSupportedType>, "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; + }; + + template + struct EnableNonnull + { + static_assert(nullability_internal::IsSupportedType>, "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; + }; + + template + struct EnableNullabilityUnknown + { + static_assert(nullability_internal::IsSupportedType>, "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; + }; + + // Note: we do not apply Clang nullability attributes (e.g. _Nullable). These + // only support raw pointers, and conditionally enabling them only for raw + // pointers inhibits template arg deduction. Ideally, they would support all + // pointer-like types. + template::type> + using NullableImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nullable")]] +#endif + = T; + + template::type> + using NonnullImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nonnull")]] +#endif + = T; + + template::type> + using NullabilityUnknownImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nullability_Unspecified")]] +#endif + = T; + + } // namespace nullability_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h b/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h new file mode 100644 index 00000000..cf5e97a0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h new file mode 100644 index 00000000..6aedb272 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h @@ -0,0 +1,149 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(b/265984188): remove all uses and delete this header. + +#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ +#define ABSL_BASE_INTERNAL_PREFETCH_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/prefetch.h" + +#ifdef __SSE__ +#include +#endif + +#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) +#include +#pragma intrinsic(_mm_prefetch) +#endif + +// Compatibility wrappers around __builtin_prefetch, to prefetch data +// for read if supported by the toolchain. + +// Move data into the cache before it is read, or "prefetch" it. +// +// The value of `addr` is the address of the memory to prefetch. If +// the target and compiler support it, data prefetch instructions are +// generated. If the prefetch is done some time before the memory is +// read, it may be in the cache by the time the read occurs. +// +// The function names specify the temporal locality heuristic applied, +// using the names of Intel prefetch instructions: +// +// T0 - high degree of temporal locality; data should be left in as +// many levels of the cache possible +// T1 - moderate degree of temporal locality +// T2 - low degree of temporal locality +// Nta - no temporal locality, data need not be left in the cache +// after the read +// +// Incorrect or gratuitous use of these functions can degrade +// performance, so use them only when representative benchmarks show +// an improvement. +// +// Example usage: +// +// absl::base_internal::PrefetchT0(addr); +// +// Currently, the different prefetch calls behave on some Intel +// architectures as follows: +// +// SNB..SKL SKX +// PrefetchT0() L1/L2/L3 L1/L2 +// PrefetchT1() L2/L3 L2 +// PrefetchT2() L2/L3 L2 +// PrefetchNta() L1/--/L3 L1* +// +// * On SKX PrefetchNta() will bring the line into L1 but will evict +// from L3 cache. This might result in surprising behavior. +// +// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. +// +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead") + inline void PrefetchT0(const void* address) + { + absl::PrefetchToLocalCache(address); + } + + ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead") + inline void PrefetchNta(const void* address) + { + absl::PrefetchToLocalCacheNta(address); + } + + ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead") + void PrefetchT1(const void* addr); + + ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead") + void PrefetchT2(const void* addr); + + // Implementation details follow. + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + + // See __builtin_prefetch: + // https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. + // + // These functions speculatively load for read only. This is + // safe for all currently supported platforms. However, prefetch for + // store may have problems depending on the target platform. + // + inline void PrefetchT1(const void* addr) + { + // Note: this uses prefetcht1 on Intel. + __builtin_prefetch(addr, 0, 2); + } + inline void PrefetchT2(const void* addr) + { + // Note: this uses prefetcht2 on Intel. + __builtin_prefetch(addr, 0, 1); + } + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + + inline void PrefetchT1(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); + } + inline void PrefetchT2(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); + } + +#else + inline void PrefetchT1(const void*) + { + } + inline void PrefetchT2(const void*) + { + } +#endif + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h b/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h new file mode 100644 index 00000000..35d51676 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h new file mode 100644 index 00000000..9ad78d7a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h @@ -0,0 +1,216 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file + +#define ABSL_RAW_LOG(severity, ...) \ + do \ + { \ + constexpr const char* absl_raw_log_internal_basename = \ + ::absl::raw_log_internal::Basename(__FILE__, sizeof(__FILE__) - 1); \ + ::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_basename, __LINE__, __VA_ARGS__); \ + ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do \ + { \ + if (ABSL_PREDICT_FALSE(!(condition))) \ + { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do \ + { \ + constexpr const char* absl_raw_log_internal_filename = __FILE__; \ + ::absl::raw_log_internal::internal_log_function( \ + ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, __LINE__, message \ + ); \ + ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do \ + { \ + if (ABSL_PREDICT_FALSE(!(condition))) \ + { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + +#ifndef NDEBUG + +#define ABSL_RAW_DLOG(severity, ...) ABSL_RAW_LOG(severity, __VA_ARGS__) +#define ABSL_RAW_DCHECK(condition, message) ABSL_RAW_CHECK(condition, message) + +#else // NDEBUG + +#define ABSL_RAW_DLOG(severity, ...) \ + while (false) \ + ABSL_RAW_LOG(severity, __VA_ARGS__) +#define ABSL_RAW_DCHECK(condition, message) \ + while (false) \ + ABSL_RAW_CHECK(condition, message) + +#endif // NDEBUG + +#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE() +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace raw_log_internal + { + + // Helper function to implement ABSL_RAW_LOG + // Logs format... at "severity" level, reporting it + // as called from file:line. + // This does not allocate memory or acquire locks. + void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + + // Writes the provided buffer directly to stderr, in a signal-safe, low-level + // manner. Preserves errno. + void AsyncSignalSafeWriteError(const char* s, size_t len); + + // compile-time function to get the "base" filename, that is, the part of + // a filename after the last "/" or "\" path separator. The search starts at + // the end of the string; the second parameter is the length of the string. + constexpr const char* Basename(const char* fname, int offset) + { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' ? fname + offset : Basename(fname, offset - 1); + } + + // For testing only. + // Returns true if raw logging is fully supported. When it is not + // fully supported, no messages will be emitted, but a log at FATAL + // severity will cause an abort. + // + // TODO(gfalcon): Come up with a better name for this method. + bool RawLoggingFullySupported(); + + // Function type for a raw_log customization hook for suppressing messages + // by severity, and for writing custom prefixes on non-suppressed messages. + // + // The installed hook is called for every raw log invocation. The message will + // be logged to stderr only if the hook returns true. FATAL errors will cause + // the process to abort, even if writing to stderr is suppressed. The hook is + // also provided with an output buffer, where it can write a custom log message + // prefix. + // + // The raw_log system does not allocate memory or grab locks. User-provided + // hooks must avoid these operations, and must not throw exceptions. + // + // 'severity' is the severity level of the message being written. + // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro + // was located. + // 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the + // hook writes a prefix, it must increment *buf and decrement *buf_size + // accordingly. + using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, int line, char** buf, int* buf_size); + + // Function type for a raw_log customization hook called to abort a process + // when a FATAL message is logged. If the provided AbortHook() returns, the + // logging system will call abort(). + // + // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro + // was located. + // The NUL-terminated logged message lives in the buffer between 'buf_start' + // and 'buf_end'. 'prefix_end' points to the first non-prefix character of the + // buffer (as written by the LogFilterAndPrefixHook.) + // + // The lifetime of the filename and message buffers will not end while the + // process remains alive. + using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); + + // Internal logging function for ABSL_INTERNAL_LOG to dispatch to. + // + // TODO(gfalcon): When string_view no longer depends on base, change this + // interface to take its message as a string_view instead. + using InternalLogFunction = void (*)(absl::LogSeverity severity, const char* file, int line, const std::string& message); + + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< + InternalLogFunction> + internal_log_function; + + // Registers hooks of the above types. Only a single hook of each type may be + // registered. It is an error to call these functions multiple times with + // different input arguments. + // + // These functions are safe to call at any point during initialization; they do + // not block or malloc, and are async-signal safe. + void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); + void RegisterAbortHook(AbortHook func); + void RegisterInternalLogFunction(InternalLogFunction func); + + } // namespace raw_log_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h new file mode 100644 index 00000000..15091e93 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h @@ -0,0 +1,61 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Used to describe how a thread may be scheduled. Typically associated with + // the declaration of a resource supporting synchronized access. + // + // SCHEDULE_COOPERATIVE_AND_KERNEL: + // Specifies that when waiting, a cooperative thread (e.g. a Fiber) may + // reschedule (using base::scheduling semantics); allowing other cooperative + // threads to proceed. + // + // SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") + // Specifies that no cooperative scheduling semantics may be used, even if the + // current thread is itself cooperatively scheduled. This means that + // cooperative threads will NOT allow other cooperative threads to execute in + // their place while waiting for a resource of this type. Host operating system + // semantics (e.g. a futex) may still be used. + // + // When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL + // by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which + // base::scheduling (e.g. the implementation of a Scheduler) may depend. + // + // NOTE: Cooperative resources may not be nested below non-cooperative ones. + // This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL + // resource if a SCHEDULE_KERNEL_ONLY resource is already held. + enum SchedulingMode + { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. + }; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h new file mode 100644 index 00000000..8471b595 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h @@ -0,0 +1,48 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class ScopedSetEnv + { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; + }; + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h new file mode 100644 index 00000000..d46788b3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h @@ -0,0 +1,282 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in two situations: +// - for use by Abseil internal code that Mutex itself depends on +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/thread_annotations.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class ABSL_LOCKABLE SpinLock + { + public: + SpinLock() : + lockword_(kSpinLockCooperative) + { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) : + lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) + { + } + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE + ~SpinLock() + { + ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); + } +#else + ~SpinLock() = default; +#endif + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) + { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) + { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), 0 + ); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() + { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) + { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) + { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const + { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() + { + if (!IsHeld()) + { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static int64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode + ) + { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() + { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; + }; + + // Corresponding locker object that arranges to acquire a spinlock for + // the duration of a C++ scope. + class ABSL_SCOPED_LOCKABLE SpinLockHolder + { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) : + lock_(l) + { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() + { + lock_->Unlock(); + } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; + }; + + // Register a hook for profiling support. + // + // The function pointer registered here will be called whenever a spinlock is + // contended. The callback is given an opaque handle to the contended spinlock + // and the number of wait cycles. This is thread-safe, but only a single + // profiler can be registered. It is an error to call this function multiple + // times with different arguments. + void RegisterSpinLockProfiler(void (*fn)(const void* lock, int64_t wait_cycles)); + + //------------------------------------------------------------------------------ + // Public interface ends here. + //------------------------------------------------------------------------------ + + // If (result & kSpinLockHeld) == 0, then *this was successfully locked. + // Otherwise, returns last observed value for lockword_. + inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, uint32_t wait_cycles) + { + if ((lock_value & kSpinLockHeld) != 0) + { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) + { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) + { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, + std::memory_order_relaxed + )) + { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 00000000..7b0cada4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc new file mode 100644 index 00000000..fe8ba674 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int, + absl::base_internal::SchedulingMode) { + absl::base_internal::ErrnoSaver errno_saver; + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc new file mode 100644 index 00000000..4f6f887d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include + +#include +#include + +#include "absl/base/internal/errno_saver.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + absl::base_internal::ErrnoSaver errno_saver; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h new file mode 100644 index 00000000..a36b6e16 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h @@ -0,0 +1,97 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +#include +#include + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // SpinLockWait() waits until it can perform one of several transitions from + // "from" to "to". It returns when it performs a transition where done==true. + struct SpinLockWaitTransition + { + uint32_t from; + uint32_t to; + bool done; + }; + + // Wait until *w can transition from trans[i].from to trans[i].to for some i + // satisfying 0<=i* w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); + + // If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` + // is true, wake all such threads. On some systems, this may be a no-op; on + // those systems, threads calling SpinLockDelay() will always wake eventually + // even if SpinLockWake() is never called. + void SpinLockWake(std::atomic* w, bool all); + + // Wait for an appropriate spin delay on iteration "loop" of a + // spin loop on location *w, whose previously observed value was "value". + // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, + // or may wait for a call to SpinLockWake(w). + void SpinLockDelay(std::atomic* w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode); + + // Helper used by AbslInternalSpinLockDelay. + // Returns a suggested delay in nanoseconds for iteration number "loop". + int SpinLockSuggestedDelayNS(int loop); + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" +{ + void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic* w, bool all); + void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode + ); +} + +inline void absl::base_internal::SpinLockWake(std::atomic* w, bool all) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake) + (w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode +) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc new file mode 100644 index 00000000..934c2016 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,40 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + // SpinLockSuggestedDelayNS() always returns a positive integer, so this + // static_cast is safe. + Sleep(static_cast( + absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000)); + } +} + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/strerror.h b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h new file mode 100644 index 00000000..5f794573 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h @@ -0,0 +1,41 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_STRERROR_H_ +#define ABSL_BASE_INTERNAL_STRERROR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // A portable and thread-safe alternative to C89's `strerror`. + // + // The C89 specification of `strerror` is not suitable for use in a + // multi-threaded application as the returned string may be changed by calls to + // `strerror` from another thread. The many non-stdlib alternatives differ + // enough in their names, availability, and semantics to justify this wrapper + // around them. `errno` will not be modified by a call to `absl::StrError`. + std::string StrError(int errnum); + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h new file mode 100644 index 00000000..ee27ec73 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#endif + +#include + +#include "absl/base/config.h" +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Nominal core processor cycles per second of each processor. This is _not_ + // necessarily the frequency of the CycleClock counter (see cycleclock.h) + // Thread-safe. + double NominalCPUFrequency(); + + // Number of logical processors (hyperthreads) in system. Thread-safe. + int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 + // On Windows, process id and thread id are of the same type according to the + // return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned + // 32-bit type. + using pid_t = uint32_t; +#endif + pid_t GetTID(); + + // Like GetTID(), but caches the result in thread-local storage in order + // to avoid unnecessary system calls. Note that there are some cases where + // one must call through to GetTID directly, which is why this exists as a + // separate function. For example, GetCachedTID() is not safe to call in + // an asynchronous signal-handling context nor right after a call to fork(). + pid_t GetCachedTID(); + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h new file mode 100644 index 00000000..6327fcf5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h @@ -0,0 +1,282 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// WARNING: This is a backwards compatible header and it will be removed after +// the migration to prefixed thread annotations is finished; please include +// "absl/base/thread_annotations.h". +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ + +// ABSL_LEGACY_THREAD_ANNOTATIONS is a *temporary* compatibility macro that can +// be defined on the compile command-line to restore the legacy spellings of the +// thread annotations macros/functions. The macros in this file are available +// under ABSL_ prefixed spellings in absl/base/thread_annotations.h. This macro +// and the legacy spellings will be removed in the future. +#ifdef ABSL_LEGACY_THREAD_ANNOTATIONS + +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ GUARDED_BY(mu_); +// ... +// }; +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// As with GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ACQUIRED_AFTER(m1_); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + +namespace thread_safety_analysis +{ + + // Takes a reference to a guarded data member, and returns an unguarded + // reference. + template + inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS + { + return v; + } + + template + inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS + { + return v; + } + +} // namespace thread_safety_analysis + +#endif // defined(ABSL_LEGACY_THREAD_ANNOTATIONS) + +#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h new file mode 100644 index 00000000..9d57505a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h @@ -0,0 +1,279 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + struct SynchLocksHeld; + struct SynchWaitParams; + + namespace base_internal + { + + class SpinLock; + struct ThreadIdentity; + + // Used by the implementation of absl::Mutex and absl::CondVar. + struct PerThreadSynch + { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() + { + return reinterpret_cast(this); + } + + PerThreadSynch* next; // Circular waiter queue; initialized to 0. + PerThreadSynch* skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State + { + kAvailable, + kQueued + }; + std::atomic state; + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; + + intptr_t readers; // Number of readers in mutex. + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld* all_locks; + }; + + // The instances of this class are allocated in NewThreadIdentity() with an + // alignment of PerThreadSynch::kAlignment. + // + // NOTE: The layout of fields in this structure is critical, please do not + // add, remove, or modify the field placements without fully auditing the + // layout. + struct ThreadIdentity + { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState + { + alignas(void*) char data[256]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; + }; + + // Returns the ThreadIdentity object representing the calling thread; guaranteed + // to be unique for its lifetime. The returned object will remain valid for the + // program's lifetime; although it may be re-assigned to a subsequent thread. + // If one does not exist, return nullptr instead. + // + // Does not malloc(*), and is async-signal safe. + // [*] Technically pthread_setspecific() does malloc on first use; however this + // is handled internally within tcmalloc's initialization already. Note that + // darwin does *not* use tcmalloc, so this can catch you if using MallocHooks + // on Apple platforms. Whatever function is calling your MallocHooks will need + // to watch for recursion on Apple platforms. + // + // New ThreadIdentity objects can be constructed and associated with a thread + // by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. + ThreadIdentity* CurrentThreadIdentityIfPresent(); + + using ThreadIdentityReclaimerFunction = void (*)(void*); + + // Sets the current thread identity to the given value. 'reclaimer' is a + // pointer to the global function for cleaning up instances on thread + // destruction. + void SetCurrentThreadIdentity(ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer); + + // Removes the currently associated ThreadIdentity from the running thread. + // This must be called from inside the ThreadIdentityReclaimerFunction, and only + // from that function. + void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be directly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS + ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) + ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +// thread_local variables cannot be in headers exposed by DLLs or in certain +// build configurations on Apple platforms. However, it is important for +// performance reasons in general that `CurrentThreadIdentityIfPresent` be +// inlined. In the other cases we opt to have the function not be inlined. Note +// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude +// this entire inline definition. +#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 +#endif + +#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT + inline ThreadIdentity* CurrentThreadIdentityIfPresent() + { + return thread_identity_ptr; + } +#endif + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h new file mode 100644 index 00000000..379d81db --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h @@ -0,0 +1,77 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Helper functions that allow throwing exceptions consistently from anywhere. + // The main use case is for header-based libraries (eg templates), as they will + // be built by many different targets with their own compiler options. + // In particular, this will allow a safe way to throw exceptions even if the + // caller is compiled with -fno-exceptions. This is intended for implementing + // things like map<>::at(), which the standard documents as throwing an + // exception on error. + // + // Using other techniques like #if tricks could lead to ODR violations. + // + // You shouldn't use it unless you're writing code that you know will be built + // both with and without exceptions and you need to conform to an interface + // that uses exceptions. + + [[noreturn]] void ThrowStdLogicError(const std::string& what_arg); + [[noreturn]] void ThrowStdLogicError(const char* what_arg); + [[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); + [[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); + [[noreturn]] void ThrowStdDomainError(const std::string& what_arg); + [[noreturn]] void ThrowStdDomainError(const char* what_arg); + [[noreturn]] void ThrowStdLengthError(const std::string& what_arg); + [[noreturn]] void ThrowStdLengthError(const char* what_arg); + [[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); + [[noreturn]] void ThrowStdOutOfRange(const char* what_arg); + [[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); + [[noreturn]] void ThrowStdRuntimeError(const char* what_arg); + [[noreturn]] void ThrowStdRangeError(const std::string& what_arg); + [[noreturn]] void ThrowStdRangeError(const char* what_arg); + [[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); + [[noreturn]] void ThrowStdOverflowError(const char* what_arg); + [[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); + [[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + + [[noreturn]] void ThrowStdBadFunctionCall(); + [[noreturn]] void ThrowStdBadAlloc(); + + // ThrowStdBadArrayNewLength() cannot be consistently supported because + // std::bad_array_new_length is missing in libstdc++ until 4.9.0. + // https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html + // https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html + // libcxx (as of 3.2) and msvc (as of 2015) both have it. + // [[noreturn]] void ThrowStdBadArrayNewLength(); + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h b/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 00000000..39207d8a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,68 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#include "absl/base/config.h" + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) +#if __has_include() +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h new file mode 100644 index 00000000..524e7c41 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h @@ -0,0 +1,96 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + inline uint16_t UnalignedLoad16(const void* p) + { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline uint32_t UnalignedLoad32(const void* p) + { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline uint64_t UnalignedLoad64(const void* p) + { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline void UnalignedStore16(void* p, uint16_t v) + { + memcpy(p, &v, sizeof v); + } + + inline void UnalignedStore32(void* p, uint32_t v) + { + memcpy(p, &v, sizeof v); + } + + inline void UnalignedStore64(void* p, uint64_t v) + { + memcpy(p, &v, sizeof v); + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 00000000..08db5acc --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,102 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal use only, you should consider using CycleClock +// instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + class UnscaledCycleClockWrapperForGetCurrentTime; + } // namespace time_internal + + namespace base_internal + { + class CycleClock; + class UnscaledCycleClockWrapperForInitializeFrequency; + + class UnscaledCycleClock + { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Allowed users + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; + }; + +#if defined(__x86_64__) + + inline int64_t UnscaledCycleClock::Now() + { + uint64_t low, high; + __asm__ volatile("rdtsc" + : "=a"(low), "=d"(high)); + return static_cast((high << 32) | low); + } + +#endif + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock_config.h b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock_config.h new file mode 100644 index 00000000..59756d7d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock_config.h @@ -0,0 +1,61 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ + +#if defined(__APPLE__) +#include +#endif + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ + defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || (defined(__APPLE__)) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +#endif + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/log_severity.h b/CAPI/cpp/grpc/include/absl/base/log_severity.h new file mode 100644 index 00000000..ba503a0b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/log_severity.h @@ -0,0 +1,177 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_LOG_SEVERITY_H_ +#define ABSL_BASE_LOG_SEVERITY_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::LogSeverity + // + // Four severity levels are defined. Logging APIs should terminate the program + // when a message is logged at severity `kFatal`; the other levels have no + // special semantics. + // + // Values other than the four defined levels (e.g. produced by `static_cast`) + // are valid, but their semantics when passed to a function, macro, or flag + // depend on the function, macro, or flag. The usual behavior is to normalize + // such values to a defined severity level, however in some cases values other + // than the defined levels are useful for comparison. + // + // Example: + // + // // Effectively disables all logging: + // SetMinLogLevel(static_cast(100)); + // + // Abseil flags may be defined with type `LogSeverity`. Dependency layering + // constraints require that the `AbslParseFlag()` overload be declared and + // defined in the flags library itself rather than here. The `AbslUnparseFlag()` + // overload is defined there as well for consistency. + // + // absl::LogSeverity Flag String Representation + // + // An `absl::LogSeverity` has a string representation used for parsing + // command-line flags based on the enumerator name (e.g. `kFatal`) or + // its unprefixed name (without the `k`) in any case-insensitive form. (E.g. + // "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an + // unprefixed string representation in all caps (e.g. "FATAL") or an integer. + // + // Additionally, the parser accepts arbitrary integers (as if the type were + // `int`). + // + // Examples: + // + // --my_log_level=kInfo + // --my_log_level=INFO + // --my_log_level=info + // --my_log_level=0 + // + // Unparsing a flag produces the same result as `absl::LogSeverityName()` for + // the standard levels and a base-ten integer otherwise. + enum class LogSeverity : int + { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, + }; + + // LogSeverities() + // + // Returns an iterable of all standard `absl::LogSeverity` values, ordered from + // least to most severe. + constexpr std::array LogSeverities() + { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; + } + + // LogSeverityName() + // + // Returns the all-caps string representation (e.g. "INFO") of the specified + // severity level if it is one of the standard levels and "UNKNOWN" otherwise. + constexpr const char* LogSeverityName(absl::LogSeverity s) + { + return s == absl::LogSeverity::kInfo ? "INFO" : s == absl::LogSeverity::kWarning ? "WARNING" : + s == absl::LogSeverity::kError ? "ERROR" : + s == absl::LogSeverity::kFatal ? "FATAL" : + "UNKNOWN"; + } + + // NormalizeLogSeverity() + // + // Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` + // normalize to `kError` (**NOT** `kFatal`). + constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) + { + return s < absl::LogSeverity::kInfo ? absl::LogSeverity::kInfo : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : + s; + } + constexpr absl::LogSeverity NormalizeLogSeverity(int s) + { + return absl::NormalizeLogSeverity(static_cast(s)); + } + + // operator<< + // + // The exact representation of a streamed `absl::LogSeverity` is deliberately + // unspecified; do not rely on it. + std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + + // Enums representing a lower bound for LogSeverity. APIs that only operate on + // messages of at least a certain level (for example, `SetMinLogLevel()`) use + // this type to specify that level. absl::LogSeverityAtLeast::kInfinity is + // a level above all threshold levels and therefore no log message will + // ever meet this threshold. + enum class LogSeverityAtLeast : int + { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, + }; + + std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + + // Enums representing an upper bound for LogSeverity. APIs that only operate on + // messages of at most a certain level (for example, buffer all messages at or + // below a certain level) use this type to specify that level. + // absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold + // levels and therefore will exclude all log messages. + enum class LogSeverityAtMost : int + { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + }; + + std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); + +#define COMPOP(op1, op2, T) \ + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) \ + { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) \ + { \ + return lhs op2 static_cast(rhs); \ + } + + // Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ + // `LogSeverityAtMost` are only supported in one direction. + // Valid checks are: + // LogSeverity >= LogSeverityAtLeast + // LogSeverity < LogSeverityAtLeast + // LogSeverity <= LogSeverityAtMost + // LogSeverity > LogSeverityAtMost + COMPOP(>, <, LogSeverityAtLeast) + COMPOP(<=, >=, LogSeverityAtLeast) + COMPOP(<, >, LogSeverityAtMost) + COMPOP(>=, <=, LogSeverityAtMost) +#undef COMPOP + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_LOG_SEVERITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/macros.h b/CAPI/cpp/grpc/include/absl/base/macros.h new file mode 100644 index 00000000..f11b0241 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/macros.h @@ -0,0 +1,149 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace macros_internal + { + // Note: this internal template function declaration is used by ABSL_ARRAYSIZE. + // The function doesn't need a definition, as we only use its type. + template + auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; + } // namespace macros_internal + ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// https://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF +#if ABSL_HAVE_ATTRIBUTE(enable_if) +#define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) \ + (false ? static_cast(expr) : static_cast(0)) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) : [] { assert(false && #expr); }()) // NOLINT +#endif + +// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` +// aborts the program in release mode (when NDEBUG is defined). The +// implementation should abort the program as quickly as possible and ideally it +// should not be possible to ignore the abort request. +#define ABSL_INTERNAL_HARDENING_ABORT() \ + do \ + { \ + ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) + +// ABSL_HARDENING_ASSERT() +// +// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement +// runtime assertions that should be enabled in hardened builds even when +// `NDEBUG` is defined. +// +// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to +// `ABSL_ASSERT()`. +// +// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on +// hardened mode. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +#define ABSL_HARDENING_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) +#else +#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) +#endif + +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW \ + do \ + { \ + throw; \ + } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW \ + do \ + { \ + } while (false) +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_MACROS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/nullability.h b/CAPI/cpp/grpc/include/absl/base/nullability.h new file mode 100644 index 00000000..af10a714 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/nullability.h @@ -0,0 +1,225 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: nullability.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of "templated annotations" for designating the +// expected nullability of pointers. These annotations allow you to designate +// pointers in one of three classification states: +// +// * "Non-null" (for pointers annotated `Nonnull`), indicating that it is +// invalid for the given pointer to ever be null. +// * "Nullable" (for pointers annotated `Nullable`), indicating that it is +// valid for the given pointer to be null. +// * "Unknown" (for pointers annotated `NullabilityUnknown`), indicating +// that the given pointer has not been yet classified as either nullable or +// non-null. This is the default state of unannotated pointers. +// +// NOTE: unannotated pointers implicitly bear the annotation +// `NullabilityUnknown`; you should rarely, if ever, see this annotation used +// in the codebase explicitly. +// +// ----------------------------------------------------------------------------- +// Nullability and Contracts +// ----------------------------------------------------------------------------- +// +// These nullability annotations allow you to more clearly specify contracts on +// software components by narrowing the *preconditions*, *postconditions*, and +// *invariants* of pointer state(s) in any given interface. It then depends on +// context who is responsible for fulfilling the annotation's requirements. +// +// For example, a function may receive a pointer argument. Designating that +// pointer argument as "non-null" tightens the precondition of the contract of +// that function. It is then the responsibility of anyone calling such a +// function to ensure that the passed pointer is not null. +// +// Similarly, a function may have a pointer as a return value. Designating that +// return value as "non-null" tightens the postcondition of the contract of that +// function. In this case, however, it is the responsibility of the function +// itself to ensure that the returned pointer is not null. +// +// Clearly defining these contracts allows providers (and consumers) of such +// pointers to have more confidence in their null state. If a function declares +// a return value as "non-null", for example, the caller should not need to +// check whether the returned value is `nullptr`; it can simply assume the +// pointer is valid. +// +// Of course most interfaces already have expectations on the nullability state +// of pointers, and these expectations are, in effect, a contract; often, +// however, those contracts are either poorly or partially specified, assumed, +// or misunderstood. These nullability annotations are designed to allow you to +// formalize those contracts within the codebase. +// +// ----------------------------------------------------------------------------- +// Using Nullability Annotations +// ----------------------------------------------------------------------------- +// +// It is important to note that these annotations are not distinct strong +// *types*. They are alias templates defined to be equal to the underlying +// pointer type. A pointer annotated `Nonnull`, for example, is simply a +// pointer of type `T*`. Each annotation acts as a form of documentation about +// the contract for the given pointer. Each annotation requires providers or +// consumers of these pointers across API boundaries to take appropriate steps +// when setting or using these pointers: +// +// * "Non-null" pointers should never be null. It is the responsibility of the +// provider of this pointer to ensure that the pointer may never be set to +// null. Consumers of such pointers can treat such pointers as non-null. +// * "Nullable" pointers may or may not be null. Consumers of such pointers +// should precede any usage of that pointer (e.g. a dereference operation) +// with a a `nullptr` check. +// * "Unknown" pointers may be either "non-null" or "nullable" but have not been +// definitively determined to be in either classification state. Providers of +// such pointers across API boundaries should determine -- over time -- to +// annotate the pointer in either of the above two states. Consumers of such +// pointers across an API boundary should continue to treat such pointers as +// they currently do. +// +// Example: +// +// // PaySalary() requires the passed pointer to an `Employee` to be non-null. +// void PaySalary(absl::Nonnull e) { +// pay(e->salary); // OK to dereference +// } +// +// // CompleteTransaction() guarantees the returned pointer to an `Account` to +// // be non-null. +// absl::Nonnull balance CompleteTransaction(double fee) { +// ... +// } +// +// // Note that specifying a nullability annotation does not prevent someone +// // from violating the contract: +// +// Nullable find(Map& employees, std::string_view name); +// +// void g(Map& employees) { +// Employee *e = find(employees, "Pat"); +// // `e` can now be null. +// PaySalary(e); // Violates contract, but compiles! +// } +// +// Nullability annotations, in other words, are useful for defining and +// narrowing contracts; *enforcement* of those contracts depends on use and any +// additional (static or dynamic analysis) tooling. +// +// NOTE: The "unknown" annotation state indicates that a pointer's contract has +// not yet been positively identified. The unknown state therefore acts as a +// form of documentation of your technical debt, and a codebase that adopts +// nullability annotations should aspire to annotate every pointer as either +// "non-null" or "nullable". +// +// ----------------------------------------------------------------------------- +// Applicability of Nullability Annotations +// ----------------------------------------------------------------------------- +// +// By default, nullability annotations are applicable to raw and smart +// pointers. User-defined types can indicate compatibility with nullability +// annotations by providing an `absl_nullability_compatible` nested type. The +// actual definition of this inner type is not relevant as it is used merely as +// a marker. It is common to use a using declaration of +// `absl_nullability_compatible` set to void. +// +// // Example: +// struct MyPtr { +// using absl_nullability_compatible = void; +// ... +// }; +// +// DISCLAIMER: +// =========================================================================== +// These nullability annotations are primarily a human readable signal about the +// intended contract of the pointer. They are not *types* and do not currently +// provide any correctness guarantees. For example, a pointer annotated as +// `Nonnull` is *not guaranteed* to be non-null, and the compiler won't +// alert or prevent assignment of a `Nullable` to a `Nonnull`. +// =========================================================================== +#ifndef ABSL_BASE_NULLABILITY_H_ +#define ABSL_BASE_NULLABILITY_H_ + +#include "absl/base/internal/nullability_impl.h" + +namespace absl +{ + + // absl::Nonnull + // + // The indicated pointer is never null. It is the responsibility of the provider + // of this pointer across an API boundary to ensure that the pointer is never be + // set to null. Consumers of this pointer across an API boundary may safely + // dereference the pointer. + // + // Example: + // + // // `employee` is designated as not null. + // void PaySalary(absl::Nonnull employee) { + // pay(*employee); // OK to dereference + // } + template + using Nonnull = nullability_internal::NonnullImpl; + + // absl::Nullable + // + // The indicated pointer may, by design, be either null or non-null. Consumers + // of this pointer across an API boundary should perform a `nullptr` check + // before performing any operation using the pointer. + // + // Example: + // + // // `employee` may be null. + // void PaySalary(absl::Nullable employee) { + // if (employee != nullptr) { + // Pay(*employee); // OK to dereference + // } + // } + template + using Nullable = nullability_internal::NullableImpl; + + // absl::NullabilityUnknown (default) + // + // The indicated pointer has not yet been determined to be definitively + // "non-null" or "nullable." Providers of such pointers across API boundaries + // should, over time, annotate such pointers as either "non-null" or "nullable." + // Consumers of these pointers across an API boundary should treat such pointers + // with the same caution they treat currently unannotated pointers. Most + // existing code will have "unknown" pointers, which should eventually be + // migrated into one of the above two nullability states: `Nonnull` or + // `Nullable`. + // + // NOTE: Because this annotation is the global default state, pointers without + // any annotation are assumed to have "unknown" semantics. This assumption is + // designed to minimize churn and reduce clutter within the codebase. + // + // Example: + // + // // `employee`s nullability state is unknown. + // void PaySalary(absl::NullabilityUnknown employee) { + // Pay(*employee); // Potentially dangerous. API provider should investigate. + // } + // + // Note that a pointer without an annotation, by default, is assumed to have the + // annotation `NullabilityUnknown`. + // + // // `employee`s nullability state is unknown. + // void PaySalary(Employee* employee) { + // Pay(*employee); // Potentially dangerous. API provider should investigate. + // } + template + using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl; + +} // namespace absl + +#endif // ABSL_BASE_NULLABILITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/optimization.h b/CAPI/cpp/grpc/include/absl/base/optimization.h new file mode 100644 index 00000000..24a44952 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/optimization.h @@ -0,0 +1,319 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include + +#include "absl/base/config.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. This macro is +// useful when you wish to preserve the existing function order within a stack +// trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \ + if (volatile int x = 0) \ + { \ + (void)x; \ + } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \ + if (volatile int x = 0) \ + { \ + (void)x; \ + } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: callers should replace uses of this macro with `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when C++17 becomes available to +// them. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// ABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class ABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#if defined(__clang__) || defined(__GNUC__) +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) +#else +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define ABSL_PREDICT_FALSE(x) (x) +#define ABSL_PREDICT_TRUE(x) (x) +#endif + +// `ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL()` aborts the program in the fastest +// possible way, with no attempt at logging. One use is to implement hardening +// aborts with ABSL_OPTION_HARDENED. Since this is an internal symbol, it +// should not be used directly outside of Abseil. +#if ABSL_HAVE_BUILTIN(__builtin_trap) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() __builtin_trap() +#else +#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() abort() +#endif + +// `ABSL_INTERNAL_UNREACHABLE_IMPL()` is the platform specific directive to +// indicate that a statement is unreachable, and to allow the compiler to +// optimize accordingly. Clients should use `ABSL_UNREACHABLE()`, which is +// defined below. +#if defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L +#define ABSL_INTERNAL_UNREACHABLE_IMPL() std::unreachable() +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_unreachable() +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_assume(false) +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __assume(false) +#else +#define ABSL_INTERNAL_UNREACHABLE_IMPL() +#endif + +// `ABSL_UNREACHABLE()` is an unreachable statement. A program which reaches +// one has undefined behavior, and the compiler may optimize accordingly. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +// Abort in hardened mode to avoid dangerous undefined behavior. +#define ABSL_UNREACHABLE() \ + do \ + { \ + ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) +#else +// The assert only fires in debug mode to aid in debugging. +// When NDEBUG is defined, reaching ABSL_UNREACHABLE() is undefined behavior. +#define ABSL_UNREACHABLE() \ + do \ + { \ + /* NOLINTNEXTLINE: misc-static-assert */ \ + assert(false && "ABSL_UNREACHABLE reached"); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) +#endif + +// ABSL_ASSUME(cond) +// +// Informs the compiler that a condition is always true and that it can assume +// it to be true for optimization purposes. +// +// WARNING: If the condition is false, the program can produce undefined and +// potentially dangerous behavior. +// +// In !NDEBUG mode, the condition is checked with an assert(). +// +// NOTE: The expression must not have side effects, as it may only be evaluated +// in some compilation modes and not others. Some compilers may issue a warning +// if the compiler cannot prove the expression has no side effects. For example, +// the expression should not use a function call since the compiler cannot prove +// that a function call does not have side effects. +// +// Example: +// +// int x = ...; +// ABSL_ASSUME(x >= 0); +// // The compiler can optimize the division to a simple right shift using the +// // assumption specified above. +// int y = x / 16; +// +#if !defined(NDEBUG) +#define ABSL_ASSUME(cond) assert(cond) +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_ASSUME(cond) __builtin_assume(cond) +#elif defined(_MSC_VER) +#define ABSL_ASSUME(cond) __assume(cond) +#elif defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L +#define ABSL_ASSUME(cond) \ + do \ + { \ + if (!(cond)) \ + std::unreachable(); \ + } while (false) +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_ASSUME(cond) \ + do \ + { \ + if (!(cond)) \ + __builtin_unreachable(); \ + } while (false) +#else +#define ABSL_ASSUME(cond) \ + do \ + { \ + static_cast(false && (cond)); \ + } while (false) +#endif + +// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) +// This macro forces small unique name on a static file level symbols like +// static local variables or static functions. This is intended to be used in +// macro definitions to optimize the cost of generated code. Do NOT use it on +// symbols exported from translation unit since it may cause a link time +// conflict. +// +// Example: +// +// #define MY_MACRO(txt) +// namespace { +// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; +// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); +// const char* VeryVeryLongFuncName() { return txt; } +// } +// + +#if defined(__GNUC__) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) +#else +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/options.h b/CAPI/cpp/grpc/include/absl/base/options.h new file mode 100644 index 00000000..3511c50f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/options.h @@ -0,0 +1,229 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. + +#ifndef ABSL_BASE_OPTIONS_H_ +#define ABSL_BASE_OPTIONS_H_ + +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// ABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. + +#define ABSL_OPTION_USE_STD_ANY 0 + +// ABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// User code should not inspect this macro. To check in the preprocessor if +// absl::optional is a typedef of std::optional, use the feature macro +// ABSL_USES_STD_OPTIONAL. + +#define ABSL_OPTION_USE_STD_OPTIONAL 0 + +// ABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::string_view is a typedef of std::string_view, use the feature macro +// ABSL_USES_STD_STRING_VIEW. + +#define ABSL_OPTION_USE_STD_STRING_VIEW 0 + +// ABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::variant is a typedef of std::variant, use the feature macro +// ABSL_USES_STD_VARIANT. + +#define ABSL_OPTION_USE_STD_VARIANT 0 + +// ABSL_OPTION_USE_INLINE_NAMESPACE +// ABSL_OPTION_INLINE_NAMESPACE_NAME +// +// These options controls whether all entities in the absl namespace are +// contained within an inner inline namespace. This does not affect the +// user-visible API of Abseil, but it changes the mangled names of all symbols. +// +// This can be useful as a version tag if you are distributing Abseil in +// precompiled form. This will prevent a binary library build of Abseil with +// one inline namespace being used with headers configured with a different +// inline namespace name. Binary packagers are reminded that Abseil does not +// guarantee any ABI stability in Abseil, so any update of Abseil or +// configuration change in such a binary package should be combined with a +// new, unique value for the inline namespace name. +// +// A value of 0 means not to use inline namespaces. +// +// A value of 1 means to use an inline namespace with the given name inside +// namespace absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also +// be changed to a new, unique identifier name. In particular "head" is not +// allowed. + +#define ABSL_OPTION_USE_INLINE_NAMESPACE 1 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20230802 + +// ABSL_OPTION_HARDENED +// +// This option enables a "hardened" build in release mode (in this context, +// release mode is defined as a build where the `NDEBUG` macro is defined). +// +// A value of 0 means that "hardened" mode is not enabled. +// +// A value of 1 means that "hardened" mode is enabled. +// +// Hardened builds have additional security checks enabled when `NDEBUG` is +// defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a +// no-op, as well as disabling other bespoke program consistency checks. By +// defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in +// release mode. These checks guard against programming errors that may lead to +// security vulnerabilities. In release mode, when one of these programming +// errors is encountered, the program will immediately abort, possibly without +// any attempt at logging. +// +// The checks enabled by this option are not free; they do incur runtime cost. +// +// The checks enabled by this option are always active when `NDEBUG` is not +// defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The +// checks enabled by this option may abort the program in a different way and +// log additional information when `NDEBUG` is not defined. + +#define ABSL_OPTION_HARDENED 0 + +#endif // ABSL_BASE_OPTIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/policy_checks.h b/CAPI/cpp/grpc/include/absl/base/policy_checks.h new file mode 100644 index 00000000..372e848d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/policy_checks.h @@ -0,0 +1,113 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Toolchain Check +// ----------------------------------------------------------------------------- + +// We support Visual Studio 2019 (MSVC++ 16.0) and later. +// This minimum will go up. +#if defined(_MSC_VER) && _MSC_VER < 1920 && !defined(__clang__) +#error "This package requires Visual Studio 2019 (MSVC++ 16.0) or higher." +#endif + +// We support GCC 7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 7 +#error "This package requires GCC 7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++14 as the minimum. +#if defined(_MSVC_LANG) +#if _MSVC_LANG < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // _MSVC_LANG < 201402L +#elif defined(__cplusplus) +#if __cplusplus < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // __cplusplus < 201402L +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/port.h b/CAPI/cpp/grpc/include/absl/base/port.h new file mode 100644 index 00000000..5bc4d6cd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/port.h @@ -0,0 +1,25 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/prefetch.h b/CAPI/cpp/grpc/include/absl/base/prefetch.h new file mode 100644 index 00000000..d3a56d2d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/prefetch.h @@ -0,0 +1,215 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: prefetch.h +// ----------------------------------------------------------------------------- +// +// This header file defines prefetch functions to prefetch memory contents +// into the first level cache (L1) for the current CPU. The prefetch logic +// offered in this header is limited to prefetching first level cachelines +// only, and is aimed at relatively 'simple' prefetching logic. +// +#ifndef ABSL_BASE_PREFETCH_H_ +#define ABSL_BASE_PREFETCH_H_ + +#include "absl/base/config.h" + +#if defined(ABSL_INTERNAL_HAVE_SSE) +#include +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1900 && \ + (defined(_M_X64) || defined(_M_IX86)) +#include +#pragma intrinsic(_mm_prefetch) +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Moves data into the L1 cache before it is read, or "prefetches" it. + // + // The value of `addr` is the address of the memory to prefetch. If + // the target and compiler support it, data prefetch instructions are + // generated. If the prefetch is done some time before the memory is + // read, it may be in the cache by the time the read occurs. + // + // This method prefetches data with the highest degree of temporal locality; + // data is prefetched where possible into all levels of the cache. + // + // Incorrect or gratuitous use of this function can degrade performance. + // Use this function only when representative benchmarks show an improvement. + // + // Example: + // + // // Computes incremental checksum for `data`. + // int ComputeChecksum(int sum, absl::string_view data); + // + // // Computes cumulative checksum for all values in `data` + // int ComputeChecksum(absl::Span data) { + // int sum = 0; + // auto it = data.begin(); + // auto pit = data.begin(); + // auto end = data.end(); + // for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) { + // absl::PrefetchToLocalCache(pit->data()); + // } + // for (; pit != end; ++pit, ++it) { + // sum = ComputeChecksum(sum, *it); + // absl::PrefetchToLocalCache(pit->data()); + // } + // for (; it != end; ++it) { + // sum = ComputeChecksum(sum, *it); + // } + // return sum; + // } + // + void PrefetchToLocalCache(const void* addr); + + // Moves data into the L1 cache before it is read, or "prefetches" it. + // + // This function is identical to `PrefetchToLocalCache()` except that it has + // non-temporal locality: the fetched data should not be left in any of the + // cache tiers. This is useful for cases where the data is used only once / + // short term, for example, invoking a destructor on an object. + // + // Incorrect or gratuitous use of this function can degrade performance. + // Use this function only when representative benchmarks show an improvement. + // + // Example: + // + // template + // void DestroyPointers(Iterator begin, Iterator end) { + // size_t distance = std::min(8U, bars.size()); + // + // int dist = 8; + // auto prefetch_it = begin; + // while (prefetch_it != end && --dist;) { + // absl::PrefetchToLocalCacheNta(*prefetch_it++); + // } + // while (prefetch_it != end) { + // delete *begin++; + // absl::PrefetchToLocalCacheNta(*prefetch_it++); + // } + // while (begin != end) { + // delete *begin++; + // } + // } + // + void PrefetchToLocalCacheNta(const void* addr); + + // Moves data into the L1 cache with the intent to modify it. + // + // This function is similar to `PrefetchToLocalCache()` except that it + // prefetches cachelines with an 'intent to modify' This typically includes + // invalidating cache entries for this address in all other cache tiers, and an + // exclusive access intent. + // + // Incorrect or gratuitous use of this function can degrade performance. As this + // function can invalidate cached cachelines on other caches and computer cores, + // incorrect usage of this function can have an even greater negative impact + // than incorrect regular prefetches. + // Use this function only when representative benchmarks show an improvement. + // + // Example: + // + // void* Arena::Allocate(size_t size) { + // void* ptr = AllocateBlock(size); + // absl::PrefetchToLocalCacheForWrite(p); + // return ptr; + // } + // + void PrefetchToLocalCacheForWrite(const void* addr); + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_HAVE_PREFETCH 1 + + // See __builtin_prefetch: + // https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. + // + inline void PrefetchToLocalCache(const void* addr) + { + __builtin_prefetch(addr, 0, 3); + } + + inline void PrefetchToLocalCacheNta(const void* addr) + { + __builtin_prefetch(addr, 0, 0); + } + + inline void PrefetchToLocalCacheForWrite(const void* addr) + { + // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1) + // unless -march=broadwell or newer; this is not generally the default, so we + // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel + // processors and has been present on AMD processors since the K6-2. +#if defined(__x86_64__) + asm("prefetchw (%0)" + : + : "r"(addr)); +#else + __builtin_prefetch(addr, 1, 3); +#endif + } + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_HAVE_PREFETCH 1 + + inline void PrefetchToLocalCache(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); + } + + inline void PrefetchToLocalCacheNta(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); + } + + inline void PrefetchToLocalCacheForWrite(const void* addr) + { +#if defined(_MM_HINT_ET0) + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_ET0); +#elif !defined(_MSC_VER) && defined(__x86_64__) + // _MM_HINT_ET0 is not universally supported. As we commented further + // up, PREFETCHW is recognized as a no-op on older Intel processors + // and has been present on AMD processors since the K6-2. We have this + // disabled for MSVC compilers as this miscompiles on older MSVC compilers. + asm("prefetchw (%0)" + : + : "r"(addr)); +#endif + } + +#else + + inline void PrefetchToLocalCache(const void* addr) + { + } + inline void PrefetchToLocalCacheNta(const void* addr) + { + } + inline void PrefetchToLocalCacheForWrite(const void* addr) + { + } + +#endif + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_PREFETCH_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h new file mode 100644 index 00000000..fc2fc2c1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h @@ -0,0 +1,339 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +// TODO(mbonadei): Remove after the backward compatibility period. +#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#if ABSL_HAVE_ATTRIBUTE(guarded_by) +#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) +#else +#define ABSL_GUARDED_BY(x) +#endif + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) +#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) +#else +#define ABSL_PT_GUARDED_BY(x) +#endif + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#if ABSL_HAVE_ATTRIBUTE(acquired_after) +#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_AFTER(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(acquired_before) +#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_BEFORE(...) +#endif + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + __attribute__((exclusive_locks_required(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + __attribute__((shared_locks_required(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCKS_REQUIRED(...) +#endif + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks that cannot be held by callers of this function, as they +// might be acquired by this function (Abseil's `Mutex` locks are +// non-reentrant). +#if ABSL_HAVE_ATTRIBUTE(locks_excluded) +#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) +#else +#define ABSL_LOCKS_EXCLUDED(...) +#endif + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#if ABSL_HAVE_ATTRIBUTE(lock_returned) +#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) +#else +#define ABSL_LOCK_RETURNED(x) +#endif + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#if ABSL_HAVE_ATTRIBUTE(lockable) +#define ABSL_LOCKABLE __attribute__((lockable)) +#else +#define ABSL_LOCKABLE +#endif + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#if ABSL_HAVE_ATTRIBUTE(scoped_lockable) +#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) +#else +#define ABSL_SCOPED_LOCKABLE +#endif + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + __attribute__((exclusive_lock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) +#endif + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + __attribute__((shared_lock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCK_FUNCTION(...) +#endif + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#if ABSL_HAVE_ATTRIBUTE(unlock_function) +#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#else +#define ABSL_UNLOCK_FUNCTION(...) +#endif + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + __attribute__((exclusive_trylock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + __attribute__((shared_trylock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) +#endif + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) +#define ABSL_ASSERT_SHARED_LOCK(...) \ + __attribute__((assert_shared_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_SHARED_LOCK(...) +#endif + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + __attribute__((no_thread_safety_analysis)) +#else +#define ABSL_NO_THREAD_SAFETY_ANALYSIS +#endif + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Takes a reference to a guarded data member, and returns an unguarded + // reference. + // Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. + template + inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS + { + return v; + } + + template + inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS + { + return v; + } + + } // namespace base_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h new file mode 100644 index 00000000..e4a38c32 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h @@ -0,0 +1,146 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cleanup.h +// ----------------------------------------------------------------------------- +// +// `absl::Cleanup` implements the scope guard idiom, invoking the contained +// callback's `operator()() &&` on scope exit. +// +// Example: +// +// ``` +// absl::Status CopyGoodData(const char* source_path, const char* sink_path) { +// FILE* source_file = fopen(source_path, "r"); +// if (source_file == nullptr) { +// return absl::NotFoundError("No source file"); // No cleanups execute +// } +// +// // C++17 style cleanup using class template argument deduction +// absl::Cleanup source_closer = [source_file] { fclose(source_file); }; +// +// FILE* sink_file = fopen(sink_path, "w"); +// if (sink_file == nullptr) { +// return absl::NotFoundError("No sink file"); // First cleanup executes +// } +// +// // C++11 style cleanup using the factory function +// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); +// +// Data data; +// while (ReadData(source_file, &data)) { +// if (!data.IsGood()) { +// absl::Status result = absl::FailedPreconditionError("Read bad data"); +// return result; // Both cleanups execute +// } +// SaveData(sink_file, &data); +// } +// +// return absl::OkStatus(); // Both cleanups execute +// } +// ``` +// +// Methods: +// +// `std::move(cleanup).Cancel()` will prevent the callback from executing. +// +// `std::move(cleanup).Invoke()` will execute the callback early, before +// destruction, and prevent the callback from executing in the destructor. +// +// Usage: +// +// `absl::Cleanup` is not an interface type. It is only intended to be used +// within the body of a function. It is not a value type and instead models a +// control flow construct. Check out `defer` in Golang for something similar. + +#ifndef ABSL_CLEANUP_CLEANUP_H_ +#define ABSL_CLEANUP_CLEANUP_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/cleanup/internal/cleanup.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class ABSL_MUST_USE_RESULT Cleanup final + { + static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); + + public: + Cleanup(Callback callback) : + storage_(std::move(callback)) + { + } // NOLINT + + Cleanup(Cleanup&& other) = default; + + void Cancel() && + { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.DestroyCallback(); + } + + void Invoke() && + { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + + ~Cleanup() + { + if (storage_.IsCallbackEngaged()) + { + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + } + + private: + cleanup_internal::Storage storage_; + }; + +// `absl::Cleanup c = /* callback */;` +// +// C++17 type deduction API for creating an instance of `absl::Cleanup` +#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + template + Cleanup(Callback callback) -> Cleanup; +#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + + // `auto c = absl::MakeCleanup(/* callback */);` + // + // C++11 type deduction API for creating an instance of `absl::Cleanup` + template + absl::Cleanup MakeCleanup(Callback callback) + { + static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); + + return {std::move(callback)}; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h new file mode 100644 index 00000000..d4930354 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h @@ -0,0 +1,118 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ +#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace cleanup_internal + { + + struct Tag + { + }; + + template + constexpr bool WasDeduced() + { + return (std::is_same::value) && + (sizeof...(Args) == 0); + } + + template + constexpr bool ReturnsVoid() + { + return (std::is_same, void>::value); + } + + template + class Storage + { + public: + Storage() = delete; + + explicit Storage(Callback callback) + { + // Placement-new into a character buffer is used for eager destruction when + // the cleanup is invoked or cancelled. To ensure this optimizes well, the + // behavior is implemented locally instead of using an absl::optional. + ::new (GetCallbackBuffer()) Callback(std::move(callback)); + is_callback_engaged_ = true; + } + + Storage(Storage&& other) + { + ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + + ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); + is_callback_engaged_ = true; + + other.DestroyCallback(); + } + + Storage(const Storage& other) = delete; + + Storage& operator=(Storage&& other) = delete; + + Storage& operator=(const Storage& other) = delete; + + void* GetCallbackBuffer() + { + return static_cast(+callback_buffer_); + } + + Callback& GetCallback() + { + return *reinterpret_cast(GetCallbackBuffer()); + } + + bool IsCallbackEngaged() const + { + return is_callback_engaged_; + } + + void DestroyCallback() + { + is_callback_engaged_ = false; + GetCallback().~Callback(); + } + + void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS + { + std::move(GetCallback())(); + } + + private: + bool is_callback_engaged_; + alignas(Callback) char callback_buffer_[sizeof(Callback)]; + }; + + } // namespace cleanup_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_map.h b/CAPI/cpp/grpc/include/absl/container/btree_map.h new file mode 100644 index 00000000..212c5d2d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_map.h @@ -0,0 +1,903 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_map.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree maps: sorted associative containers mapping +// keys to values. +// +// * `absl::btree_map<>` +// * `absl::btree_multimap<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::map` and `std::multimap`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::map` and `std::multimap`, which are commonly implemented using +// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree maps perform better than their `std::map` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::map` and `std::multimap` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. Another important difference is that +// key-types must be copy-constructible. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. + +#ifndef ABSL_CONTAINER_BTREE_MAP_H_ +#define ABSL_CONTAINER_BTREE_MAP_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace container_internal + { + + template + struct map_params; + + } // namespace container_internal + + // absl::btree_map<> + // + // An `absl::btree_map` is an ordered associative container of + // unique keys and associated values designed to be a more efficient replacement + // for `std::map` (in most cases). + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_map` uses a default allocator of + // `std::allocator>` to allocate (and deallocate) + // nodes, and construct and destruct values within those nodes. You may + // instead specify a custom allocator `A` (which in turn requires specifying a + // custom comparator `C`) as in `absl::btree_map`. + // + template, typename Alloc = std::allocator>> + class btree_map : public container_internal::btree_map_container>> + { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map map1; + // + // * Initializer List constructor + // + // absl::btree_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map map7(v.begin(), v.end()); + btree_map() + { + } + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::insert_or_assign() + // + // Inserts an element of the specified value into the `btree_map` provided + // that a value with the given key does not already exist, or replaces the + // corresponding mapped type with the forwarded `obj` argument if a key for + // that value already exists, returning an iterator pointing to the newly + // inserted element. Overloads are listed below. + // + // pair insert_or_assign(const key_type& k, M&& obj): + // pair insert_or_assign(key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map`. If the returned bool is true, insertion took place, and if + // it's false, assignment took place. + // + // iterator insert_or_assign(const_iterator hint, + // const key_type& k, M&& obj): + // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_map::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_map::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; + }; + + // absl::swap(absl::btree_map<>, absl::btree_map<>) + // + // Swaps the contents of two `absl::btree_map` containers. + template + void swap(btree_map& x, btree_map& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_map<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_map::size_type erase_if( + btree_map& map, Pred pred + ) + { + return container_internal::btree_access::erase_if(map, std::move(pred)); + } + + // absl::btree_multimap + // + // An `absl::btree_multimap` is an ordered associative container of + // keys and associated values designed to be a more efficient replacement for + // `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap + // allows multiple elements with equivalent keys. + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_multimap` uses a default allocator of + // `std::allocator>` to allocate (and deallocate) + // nodes, and construct and destruct values within those nodes. You may + // instead specify a custom allocator `A` (which in turn requires specifying a + // custom comparator `C`) as in `absl::btree_multimap`. + // + template, typename Alloc = std::allocator>> + class btree_multimap : public container_internal::btree_multimap_container>> + { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap map1; + // + // * Initializer List constructor + // + // absl::btree_multimap map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap map7(v.begin(), v.end()); + btree_multimap() + { + } + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_multimap::merge() + // + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multimap::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multimap::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; + }; + + // absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) + // + // Swaps the contents of two `absl::btree_multimap` containers. + template + void swap(btree_multimap& x, btree_multimap& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_multimap<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_multimap::size_type erase_if( + btree_multimap& map, Pred pred + ) + { + return container_internal::btree_access::erase_if(map, std::move(pred)); + } + + namespace container_internal + { + + // A parameters structure for holding the type parameters for a btree_map. + // Compare and Alloc should be nothrow copy-constructible. + template + struct map_params : common_params> + { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + template + static auto key(const V& value) -> decltype(value.first) + { + return value.first; + } + static const Key& key(const slot_type* s) + { + return slot_policy::key(s); + } + static const Key& key(slot_type* s) + { + return slot_policy::key(s); + } + // For use in node handle. + static auto mutable_key(slot_type* s) + -> decltype(slot_policy::mutable_key(s)) + { + return slot_policy::mutable_key(s); + } + static mapped_type& value(value_type* value) + { + return value->second; + } + }; + + } // namespace container_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_set.h b/CAPI/cpp/grpc/include/absl/container/btree_set.h new file mode 100644 index 00000000..62256549 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_set.h @@ -0,0 +1,843 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_set.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree sets: sorted associative containers of +// values. +// +// * `absl::btree_set<>` +// * `absl::btree_multiset<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::set` and `std::multiset`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::set` and `std::multiset`, which are commonly implemented using +// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree sets perform better than their `std::set` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::set` and `std::multiset` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. + +#ifndef ABSL_CONTAINER_BTREE_SET_H_ +#define ABSL_CONTAINER_BTREE_SET_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace container_internal + { + + template + struct set_slot_policy; + + template + struct set_params; + + } // namespace container_internal + + // absl::btree_set<> + // + // An `absl::btree_set` is an ordered associative container of unique key + // values designed to be a more efficient replacement for `std::set` (in most + // cases). + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_set` uses a default allocator of `std::allocator` to + // allocate (and deallocate) nodes, and construct and destruct values within + // those nodes. You may instead specify a custom allocator `A` (which in turn + // requires specifying a custom comparator `C`) as in + // `absl::btree_set`. + // + template, typename Alloc = std::allocator> + class btree_set : public container_internal::btree_set_container>> + { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set set1; + // + // * Initializer List constructor + // + // absl::btree_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_set set7(v.begin(), v.end()); + btree_set() + { + } + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_set::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_set::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; + }; + + // absl::swap(absl::btree_set<>, absl::btree_set<>) + // + // Swaps the contents of two `absl::btree_set` containers. + template + void swap(btree_set& x, btree_set& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_set<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_set::size_type erase_if(btree_set& set, Pred pred) + { + return container_internal::btree_access::erase_if(set, std::move(pred)); + } + + // absl::btree_multiset<> + // + // An `absl::btree_multiset` is an ordered associative container of + // keys and associated values designed to be a more efficient replacement + // for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree + // multiset allows equivalent elements. + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_multiset` uses a default allocator of `std::allocator` + // to allocate (and deallocate) nodes, and construct and destruct values within + // those nodes. You may instead specify a custom allocator `A` (which in turn + // requires specifying a custom comparator `C`) as in + // `absl::btree_multiset`. + // + template, typename Alloc = std::allocator> + class btree_multiset : public container_internal::btree_multiset_container>> + { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset set1; + // + // * Initializer List constructor + // + // absl::btree_multiset set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_multiset set7(v.begin(), v.end()); + btree_multiset() + { + } + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_multiset::merge() + // + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multiset::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multiset::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; + }; + + // absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) + // + // Swaps the contents of two `absl::btree_multiset` containers. + template + void swap(btree_multiset& x, btree_multiset& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_multiset<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_multiset::size_type erase_if( + btree_multiset& set, Pred pred + ) + { + return container_internal::btree_access::erase_if(set, std::move(pred)); + } + + namespace container_internal + { + + // This type implements the necessary functions from the + // absl::container_internal::slot_type interface for btree_(multi)set. + template + struct set_slot_policy + { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type& element(slot_type* slot) + { + return *slot; + } + static const value_type& element(const slot_type* slot) + { + return *slot; + } + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); + } + + template + static void construct(Alloc* alloc, slot_type* slot, slot_type* other) + { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void construct(Alloc* alloc, slot_type* slot, const slot_type* other) + { + absl::allocator_traits::construct(*alloc, slot, *other); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) + { + absl::allocator_traits::destroy(*alloc, slot); + } + }; + + // A parameters structure for holding the type parameters for a btree_set. + // Compare and Alloc should be nothrow copy-constructible. + template + struct set_params : common_params> + { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + + template + static const V& key(const V& value) + { + return value; + } + static const Key& key(const slot_type* slot) + { + return *slot; + } + static const Key& key(slot_type* slot) + { + return *slot; + } + }; + + } // namespace container_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_test.h b/CAPI/cpp/grpc/include/absl/container/btree_test.h new file mode 100644 index 00000000..7739ea89 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_test.h @@ -0,0 +1,215 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/strings/cord.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Like remove_const but propagates the removal through std::pair. + template + struct remove_pair_const + { + using type = typename std::remove_const::type; + }; + template + struct remove_pair_const> + { + using type = std::pair::type, typename remove_pair_const::type>; + }; + + // Utility class to provide an accessor for a key given a value. The default + // behavior is to treat the value as a pair and return the first element. + template + struct KeyOfValue + { + struct type + { + const K& operator()(const V& p) const + { + return p.first; + } + }; + }; + + // Partial specialization of KeyOfValue class for when the key and value are + // the same type such as in set<> and btree_set<>. + template + struct KeyOfValue + { + struct type + { + const K& operator()(const K& k) const + { + return k; + } + }; + }; + + inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) + { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) + { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; + } + + template + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + K operator()(int i) const + { + assert(i <= maxval); + return K(i); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + absl::Time operator()(int i) const + { + return absl::FromUnixMillis(i); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + std::string operator()(int i) const + { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + Cord operator()(int i) const + { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } + }; + + template + struct Generator> + { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : + tgen(m), + ugen(m) + { + } + std::pair operator()(int i) const + { + return std::make_pair(tgen(i), ugen(i)); + } + }; + + // Generate n values for our tests and benchmarks. Value range is [0, maxval]. + inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) + { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) + { + for (int i = values.size(); i < n; i++) + { + int value; + do + { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; + } + + // Generates n values in the range [0, maxval]. + template + std::vector GenerateValuesWithSeed(int n, int maxval, int seed) + { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) + { + vec.push_back(gen(nums[i])); + } + + return vec; + } + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/fixed_array.h b/CAPI/cpp/grpc/include/absl/container/fixed_array.h new file mode 100644 index 00000000..71351285 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/fixed_array.h @@ -0,0 +1,663 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ +#define ABSL_CONTAINER_FIXED_ARRAY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + constexpr static auto kFixedArrayUseDefault = static_cast(-1); + + // ----------------------------------------------------------------------------- + // FixedArray + // ----------------------------------------------------------------------------- + // + // A `FixedArray` provides a run-time fixed-size array, allocating a small array + // inline for efficiency. + // + // Most users should not specify the `N` template parameter and let `FixedArray` + // automatically determine the number of elements to store inline based on + // `sizeof(T)`. If `N` is specified, the `FixedArray` implementation will use + // inline storage for arrays with a length <= `N`. + // + // Note that a `FixedArray` constructed with a `size_type` argument will + // default-initialize its values by leaving trivially constructible types + // uninitialized (e.g. int, int[4], double), and others default-constructed. + // This matches the behavior of c-style arrays and `std::array`, but not + // `std::vector`. + template> + class FixedArray + { + static_assert(!std::is_array::value || std::extent::value > 0, "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() + { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() + { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() + { + return !absl::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) : static_cast(N)); + + FixedArray(const FixedArray& other) noexcept(NoexceptCopyable()) : + FixedArray(other, AllocatorTraits::select_on_container_copy_construction(other.storage_.alloc())) + { + } + + FixedArray(const FixedArray& other, const allocator_type& a) noexcept(NoexceptCopyable()) : + FixedArray(other.begin(), other.end(), a) + { + } + + FixedArray(FixedArray&& other) noexcept(NoexceptMovable()) : + FixedArray(std::move(other), other.storage_.alloc()) + { + } + + FixedArray(FixedArray&& other, const allocator_type& a) noexcept(NoexceptMovable()) : + FixedArray(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), a) + { + } + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) : + storage_(n, a) + { + if (DefaultConstructorIsNonTrivial()) + { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val, const allocator_type& a = allocator_type()) : + storage_(n, a) + { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, const allocator_type& a = allocator_type()) : + FixedArray(init_list.begin(), init_list.end(), a) + { + } + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template* = nullptr> + FixedArray(Iterator first, Iterator last, const allocator_type& a = allocator_type()) : + storage_(std::distance(first, last), a) + { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept + { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) + { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const + { + return storage_.size(); + } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const + { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const + { + return size() == 0; + } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const + { + return size() * sizeof(value_type); + } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return AsValueType(storage_.begin()); + } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return AsValueType(storage_.begin()); + } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the fixed + // array, or throws std::out_of_range + reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data(); + } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data(); + } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return begin(); + } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data() + size(); + } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data() + size(); + } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return end(); + } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return reverse_iterator(end()); + } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return rbegin(); + } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return reverse_iterator(begin()); + } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return rend(); + } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) + { + std::fill(begin(), end(), val); + } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) + { + return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) + { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) + { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(lhs < rhs); + } + + template + friend H AbslHashValue(H h, const FixedArray& v) + { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), v.size()); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template, size_t InnerN = std::extent::value> + struct StorageElementWrapper + { + InnerT array[InnerN]; + }; + + using StorageElement = + absl::conditional_t::value, StorageElementWrapper, value_type>; + + static pointer AsValueType(pointer ptr) + { + return ptr; + } + static pointer AsValueType(StorageElementWrapper* ptr) + { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage + { + public: + StorageElement* data() + { + return reinterpret_cast(buff_); + } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + void* RedzoneBegin() + { + return &redzone_begin_; + } + void* RedzoneEnd() + { + return &redzone_end_ + 1; + } +#endif // ABSL_HAVE_ADDRESS_SANITIZER + + private: + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage + { + public: + StorageElement* data() + { + return nullptr; + } + void AnnotateConstruct(size_type) + { + } + void AnnotateDestruct(size_type) + { + } + }; + + using InlinedStorage = + absl::conditional_t; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage + { + public: + Storage(size_type n, const allocator_type& a) : + size_alloc_(n, a), + data_(InitializeData()) + { + } + + ~Storage() noexcept + { + if (UsingInlinedStorage(size())) + { + InlinedStorage::AnnotateDestruct(size()); + } + else + { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const + { + return size_alloc_.template get<0>(); + } + StorageElement* begin() const + { + return data_; + } + StorageElement* end() const + { + return begin() + size(); + } + allocator_type& alloc() + { + return size_alloc_.template get<1>(); + } + const allocator_type& alloc() const + { + return size_alloc_.template get<1>(); + } + + private: + static bool UsingInlinedStorage(size_type n) + { + return n <= inline_elements; + } + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ABSL_ATTRIBUTE_NOINLINE +#endif // ABSL_HAVE_ADDRESS_SANITIZER + StorageElement* InitializeData() + { + if (UsingInlinedStorage(size())) + { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } + else + { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size()) + ); + } + } + + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; + }; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + template + constexpr size_t FixedArray::kInlineBytesDefault; + + template + constexpr typename FixedArray::size_type + FixedArray::inline_elements; +#endif + + template + void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n + ) + { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) + return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode + } + + template + void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n + ) + { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) + return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode + } + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h new file mode 100644 index 00000000..2f0abc1c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h @@ -0,0 +1,638 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct FlatHashMapPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::flat_hash_map + // ----------------------------------------------------------------------------- + // + // An `absl::flat_hash_map` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_map` with + // the following notable differences: + // + // * Requires keys that are CopyConstructible + // * Requires values that are MoveConstructible + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the map is provided a compatible heterogeneous + // hashing function and equality operator. + // * Invalidates any references and pointers to elements within the table after + // `rehash()`. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash map. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `flat_hash_map` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `flat_hash_map`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::flat_hash_map` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // NOTE: A `flat_hash_map` stores its value types directly inside its + // implementation array to avoid memory indirection. Because a `flat_hash_map` + // is designed to move data when rehashed, map values will not retain pointer + // stability. If you require pointer stability, or if your values are large, + // consider using `absl::flat_hash_map>` instead. + // If your types are not moveable or you require pointer stability for keys, + // consider `absl::node_hash_map`. + // + // Example: + // + // // Create a flat hash map of three strings (that map to strings) + // absl::flat_hash_map ducks = + // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; + // + // // Insert a new element into the flat hash map + // ducks.insert({"d", "donald"}); + // + // // Force a rehash of the flat hash map + // ducks.rehash(0); + // + // // Find the element with the key "b" + // std::string search_key = "b"; + // auto result = ducks.find(search_key); + // if (result != ducks.end()) { + // std::cout << "Result: " << result->second << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator>> + class flat_hash_map : public absl::container_internal::raw_hash_map, Hash, Eq, Allocator> + { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() + { + } + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(flat_hash_map<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct FlatHashMapPolicy + { + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + slot_policy::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) + { + slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposePair(std::forward(f), std::forward(args)...); + } + + static size_t space_used(const slot_type*) + { + return 0; + } + + static std::pair& element(slot_type* slot) + { + return slot->value; + } + + static V& value(std::pair* kv) + { + return kv->second; + } + static const V& value(const std::pair* kv) + { + return kv->second; + } + }; + + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h new file mode 100644 index 00000000..5c2cd2fd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h @@ -0,0 +1,524 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct FlatHashSetPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::flat_hash_set + // ----------------------------------------------------------------------------- + // + // An `absl::flat_hash_set` is an unordered associative container which has + // been optimized for both speed and memory footprint in most common use cases. + // Its interface is similar to that of `std::unordered_set` with the + // following notable differences: + // + // * Requires keys that are CopyConstructible + // * Supports heterogeneous lookup, through `find()` and `insert()`, provided + // that the set is provided a compatible heterogeneous hashing function and + // equality operator. + // * Invalidates any references and pointers to elements within the table after + // `rehash()`. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash set. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All + // fundamental and Abseil types that support the `absl::Hash` framework have a + // compatible equality operator for comparing insertions into `flat_hash_set`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::flat_hash_set` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // NOTE: A `flat_hash_set` stores its keys directly inside its implementation + // array to avoid memory indirection. Because a `flat_hash_set` is designed to + // move data when rehashed, set keys will not retain pointer stability. If you + // require pointer stability, consider using + // `absl::flat_hash_set>`. If your type is not moveable and + // you require pointer stability, consider `absl::node_hash_set` instead. + // + // Example: + // + // // Create a flat hash set of three strings + // absl::flat_hash_set ducks = + // {"huey", "dewey", "louie"}; + // + // // Insert a new element into the flat hash set + // ducks.insert("donald"); + // + // // Force a rehash of the flat hash set + // ducks.rehash(0); + // + // // See if "dewey" is present + // if (ducks.contains("dewey")) { + // std::cout << "We found dewey!" << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator> + class flat_hash_set : public absl::container_internal::raw_hash_set, Hash, Eq, Allocator> + { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() + { + } + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash set into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(flat_hash_set<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct FlatHashSetPolicy + { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + absl::allocator_traits::destroy(*alloc, slot); + } + + static T& element(slot_type* slot) + { + return *slot; + } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)... + ); + } + + static size_t space_used(const T*) + { + return 0; + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h new file mode 100644 index 00000000..98036bd7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h @@ -0,0 +1,1097 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: inlined_vector.h +// ----------------------------------------------------------------------------- +// +// This header file contains the declaration and definition of an "inlined +// vector" which behaves in an equivalent fashion to a `std::vector`, except +// that storage for small sequences of the vector are provided inline without +// requiring any heap allocation. +// +// An `absl::InlinedVector` specifies the default capacity `N` as one of +// its template parameters. Instances where `size() <= N` hold contained +// elements in inline space. Typically `N` is very small so that sequences that +// are expected to be short do not require allocations. +// +// An `absl::InlinedVector` does not usually require a specific allocator. If +// the inlined vector grows beyond its initial constraints, it will need to +// allocate (as any normal `std::vector` would). This is usually performed with +// the default allocator (defined as `std::allocator`). Optionally, a custom +// allocator type may be specified as `A` in `absl::InlinedVector`. + +#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/inlined_vector.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + // ----------------------------------------------------------------------------- + // InlinedVector + // ----------------------------------------------------------------------------- + // + // An `absl::InlinedVector` is designed to be a drop-in replacement for + // `std::vector` for use cases where the vector's size is sufficiently small + // that it can be inlined. If the inlined vector does grow beyond its estimated + // capacity, it will trigger an initial allocation on the heap, and will behave + // as a `std::vector`. The API of the `absl::InlinedVector` within this file is + // designed to cover the same API footprint as covered by `std::vector`. + template> + class InlinedVector + { + static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); + + using Storage = inlined_vector_internal::Storage; + + template + using AllocatorTraits = inlined_vector_internal::AllocatorTraits; + template + using MoveIterator = inlined_vector_internal::MoveIterator; + template + using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + template + using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; + template + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + template + using EnableIfAtLeastForwardIterator = absl::enable_if_t< + inlined_vector_internal::IsAtLeastForwardIterator::value, + int>; + template + using DisableIfAtLeastForwardIterator = absl::enable_if_t< + !inlined_vector_internal::IsAtLeastForwardIterator::value, + int>; + + using MemcpyPolicy = typename Storage::MemcpyPolicy; + using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy; + using ElementwiseConstructPolicy = + typename Storage::ElementwiseConstructPolicy; + using MoveAssignmentPolicy = typename Storage::MoveAssignmentPolicy; + + public: + using allocator_type = A; + using value_type = inlined_vector_internal::ValueType; + using pointer = inlined_vector_internal::Pointer; + using const_pointer = inlined_vector_internal::ConstPointer; + using size_type = inlined_vector_internal::SizeType; + using difference_type = inlined_vector_internal::DifferenceType; + using reference = inlined_vector_internal::Reference; + using const_reference = inlined_vector_internal::ConstReference; + using iterator = inlined_vector_internal::Iterator; + using const_iterator = inlined_vector_internal::ConstIterator; + using reverse_iterator = inlined_vector_internal::ReverseIterator; + using const_reverse_iterator = + inlined_vector_internal::ConstReverseIterator; + + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a value-initialized allocator. + InlinedVector() noexcept(noexcept(allocator_type())) : + storage_() + { + } + + // Creates an empty inlined vector with a copy of `allocator`. + explicit InlinedVector(const allocator_type& allocator) noexcept + : + storage_(allocator) + { + } + + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(DefaultValueAdapter(), n); + } + + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); + } + + // Creates an inlined vector with copies of the elements of `list`. + InlinedVector(std::initializer_list list, const allocator_type& allocator = allocator_type()) : + InlinedVector(list.begin(), list.end(), allocator) + { + } + + // Creates an inlined vector with elements constructed from the provided + // forward iterator range [`first`, `last`). + // + // NOTE: the `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template = 0> + InlinedVector(ForwardIterator first, ForwardIterator last, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(IteratorValueAdapter(first), static_cast(std::distance(first, last))); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template = 0> + InlinedVector(InputIterator first, InputIterator last, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + std::copy(first, last, std::back_inserter(*this)); + } + + // Creates an inlined vector by copying the contents of `other` using + // `other`'s allocator. + InlinedVector(const InlinedVector& other) : + InlinedVector(other, other.storage_.GetAllocator()) + { + } + + // Creates an inlined vector by copying the contents of `other` using the + // provided `allocator`. + InlinedVector(const InlinedVector& other, const allocator_type& allocator) : + storage_(allocator) + { + // Fast path: if the other vector is empty, there's nothing for us to do. + if (other.empty()) + { + return; + } + + // Fast path: if the value type is trivially copy constructible, we know the + // allocator doesn't do anything fancy, and there is nothing on the heap + // then we know it is legal for us to simply memcpy the other vector's + // inlined bytes to form our copy of its elements. + if (absl::is_trivially_copy_constructible::value && + std::is_same>::value && + !other.storage_.GetIsAllocated()) + { + storage_.MemcpyFrom(other.storage_); + return; + } + + storage_.InitFrom(other.storage_); + } + + // Creates an inlined vector by moving in the contents of `other` without + // allocating. If `other` contains allocated memory, the newly-created inlined + // vector will take ownership of that memory. However, if `other` does not + // contain allocated memory, the newly-created inlined vector will perform + // element-wise move construction of the contents of `other`. + // + // NOTE: since no allocation is performed for the inlined vector in either + // case, the `noexcept(...)` specification depends on whether moving the + // underlying objects can throw. It is assumed assumed that... + // a) move constructors should only throw due to allocation failure. + // b) if `value_type`'s move constructor allocates, it uses the same + // allocation function as the inlined vector's allocator. + // Thus, the move constructor is non-throwing if the allocator is non-throwing + // or `value_type`'s move constructor is specified as `noexcept`. + InlinedVector(InlinedVector&& other) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value + ) : + storage_(other.storage_.GetAllocator()) + { + // Fast path: if the value type can be trivially relocated (i.e. moved from + // and destroyed), and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for `other` + // and remove its own reference to them. It's as if we had individually + // move-constructed each value and then destroyed the original. + if (absl::is_trivially_relocatable::value && + std::is_same>::value) + { + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + return; + } + + // Fast path: if the other vector is on the heap, we can simply take over + // its allocation. + if (other.storage_.GetIsAllocated()) + { + storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + return; + } + + // Otherwise we must move each element individually. + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData()) + ); + + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, other.storage_.GetSize() + ); + + storage_.SetInlinedSize(other.storage_.GetSize()); + } + + // Creates an inlined vector by moving in the contents of `other` with a copy + // of `allocator`. + // + // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` + // contains allocated memory, this move constructor will still allocate. Since + // allocation is performed, this constructor can only be `noexcept` if the + // specified allocator is also `noexcept`. + InlinedVector( + InlinedVector&& other, + const allocator_type& + allocator + ) noexcept(absl::allocator_is_nothrow::value) : + storage_(allocator) + { + // Fast path: if the value type can be trivially relocated (i.e. moved from + // and destroyed), and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for `other` + // and remove its own reference to them. It's as if we had individually + // move-constructed each value and then destroyed the original. + if (absl::is_trivially_relocatable::value && + std::is_same>::value) + { + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + return; + } + + // Fast path: if the other vector is on the heap and shared the same + // allocator, we can simply take over its allocation. + if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && + other.storage_.GetIsAllocated()) + { + storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + return; + } + + // Otherwise we must move each element individually. + storage_.Initialize( + IteratorValueAdapter>(MoveIterator(other.data())), + other.size() + ); + } + + ~InlinedVector() + { + } + + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- + + // `InlinedVector::empty()` + // + // Returns whether the inlined vector contains no elements. + bool empty() const noexcept + { + return !size(); + } + + // `InlinedVector::size()` + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept + { + return storage_.GetSize(); + } + + // `InlinedVector::max_size()` + // + // Returns the maximum number of elements the inlined vector can hold. + size_type max_size() const noexcept + { + // One bit of the size storage is used to indicate whether the inlined + // vector contains allocated memory. As a result, the maximum size that the + // inlined vector can express is the minimum of the limit of how many + // objects we can allocate and std::numeric_limits::max() / 2. + return (std::min)(AllocatorTraits::max_size(storage_.GetAllocator()), (std::numeric_limits::max)() / 2); + } + + // `InlinedVector::capacity()` + // + // Returns the number of elements that could be stored in the inlined vector + // without requiring a reallocation. + // + // NOTE: for most inlined vectors, `capacity()` should be equal to the + // template parameter `N`. For inlined vectors which exceed this capacity, + // they will no longer be inlined and `capacity()` will equal the capactity of + // the allocated memory. + size_type capacity() const noexcept + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() : storage_.GetInlinedCapacity(); + } + + // `InlinedVector::data()` + // + // Returns a `pointer` to the elements of the inlined vector. This pointer + // can be used to access and modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + pointer data() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); + } + + // Overload of `InlinedVector::data()` that returns a `const_pointer` to the + // elements of the inlined vector. This pointer can be used to access but not + // modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + const_pointer data() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); + } + + // `InlinedVector::operator[](...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of `InlinedVector::operator[](...)` that returns a + // `const_reference` to the `i`th element of the inlined vector. + const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // `InlinedVector::at(...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type)` failed bounds check" + ); + } + return data()[i]; + } + + // Overload of `InlinedVector::at(...)` that returns a `const_reference` to + // the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type) const` failed bounds check" + ); + } + return data()[i]; + } + + // `InlinedVector::front()` + // + // Returns a `reference` to the first element of the inlined vector. + reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of `InlinedVector::front()` that returns a `const_reference` to + // the first element of the inlined vector. + const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // `InlinedVector::back()` + // + // Returns a `reference` to the last element of the inlined vector. + reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of `InlinedVector::back()` that returns a `const_reference` to the + // last element of the inlined vector. + const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // `InlinedVector::begin()` + // + // Returns an `iterator` to the beginning of the inlined vector. + iterator begin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data(); + } + + // Overload of `InlinedVector::begin()` that returns a `const_iterator` to + // the beginning of the inlined vector. + const_iterator begin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data(); + } + + // `InlinedVector::end()` + // + // Returns an `iterator` to the end of the inlined vector. + iterator end() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data() + size(); + } + + // Overload of `InlinedVector::end()` that returns a `const_iterator` to the + // end of the inlined vector. + const_iterator end() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return data() + size(); + } + + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return begin(); + } + + // `InlinedVector::cend()` + // + // Returns a `const_iterator` to the end of the inlined vector. + const_iterator cend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return end(); + } + + // `InlinedVector::rbegin()` + // + // Returns a `reverse_iterator` from the end of the inlined vector. + reverse_iterator rbegin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return reverse_iterator(end()); + } + + // Overload of `InlinedVector::rbegin()` that returns a + // `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_reverse_iterator(end()); + } + + // `InlinedVector::rend()` + // + // Returns a `reverse_iterator` from the beginning of the inlined vector. + reverse_iterator rend() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return reverse_iterator(begin()); + } + + // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_reverse_iterator(begin()); + } + + // `InlinedVector::crbegin()` + // + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return rbegin(); + } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. + const_reverse_iterator crend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return rend(); + } + + // `InlinedVector::get_allocator()` + // + // Returns a copy of the inlined vector's allocator. + allocator_type get_allocator() const + { + return storage_.GetAllocator(); + } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=(...)` + // + // Replaces the elements of the inlined vector with copies of the elements of + // `list`. + InlinedVector& operator=(std::initializer_list list) + { + assign(list.begin(), list.end()); + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that replaces the elements of + // the inlined vector with copies of the elements of `other`. + InlinedVector& operator=(const InlinedVector& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + const_pointer other_data = other.data(); + assign(other_data, other_data + other.size()); + } + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that moves the elements of + // `other` into the inlined vector. + // + // NOTE: as a result of calling this overload, `other` is left in a valid but + // unspecified state. + InlinedVector& operator=(InlinedVector&& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + MoveAssignment(MoveAssignmentPolicy{}, std::move(other)); + } + + return *this; + } + + // `InlinedVector::assign(...)` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) + { + storage_.Assign(CopyValueAdapter(std::addressof(v)), n); + } + + // Overload of `InlinedVector::assign(...)` that replaces the contents of the + // inlined vector with copies of the elements of `list`. + void assign(std::initializer_list list) + { + assign(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + void assign(ForwardIterator first, ForwardIterator last) + { + storage_.Assign(IteratorValueAdapter(first), static_cast(std::distance(first, last))); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + void assign(InputIterator first, InputIterator last) + { + size_type i = 0; + for (; i < size() && first != last; ++i, static_cast(++first)) + { + data()[i] = *first; + } + + erase(data() + i, data() + size()); + std::copy(first, last, std::back_inserter(*this)); + } + + // `InlinedVector::resize(...)` + // + // Resizes the inlined vector to contain `n` elements. + // + // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are value-initialized. + void resize(size_type n) + { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(DefaultValueAdapter(), n); + } + + // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to + // contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are copied-constructed from `v`. + void resize(size_type n, const_reference v) + { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(CopyValueAdapter(std::addressof(v)), n); + } + + // `InlinedVector::insert(...)` + // + // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly + // inserted element. + iterator insert(const_iterator pos, const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(pos, v); + } + + // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using + // move semantics, returning an `iterator` to the newly inserted element. + iterator insert(const_iterator pos, value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(pos, std::move(v)); + } + + // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies + // of `v` starting at `pos`, returning an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(n != 0)) + { + value_type dealias = v; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), n); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + else + { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts copies of the + // elements of `list` starting at `pos`, returning an `iterator` pointing to + // the first of the newly inserted elements. + iterator insert(const_iterator pos, std::initializer_list list) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert(pos, list.begin(), list.end()); + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(first != last)) + { + return storage_.Insert( + pos, IteratorValueAdapter(first), static_cast(std::distance(first, last)) + ); + } + else + { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + iterator insert(const_iterator pos, InputIterator first, InputIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + size_type index = static_cast(std::distance(cbegin(), pos)); + for (size_type i = index; first != last; ++i, static_cast(++first)) + { + insert(data() + i, *first); + } + + return iterator(data() + index); + } + + // `InlinedVector::emplace(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `pos`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator pos, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + value_type dealias(std::forward(args)...); + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return storage_.Insert(pos, IteratorValueAdapter>(MoveIterator(std::addressof(dealias))), 1); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + + // `InlinedVector::emplace_back(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `end()`, returning a `reference` to the newly emplaced element. + template + reference emplace_back(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return storage_.EmplaceBack(std::forward(args)...); + } + + // `InlinedVector::push_back(...)` + // + // Inserts a copy of `v` in the inlined vector at `end()`. + void push_back(const_reference v) + { + static_cast(emplace_back(v)); + } + + // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` + // using move semantics. + void push_back(value_type&& v) + { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at `back()`, reducing the size by `1`. + void pop_back() noexcept + { + ABSL_HARDENING_ASSERT(!empty()); + + AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); + storage_.SubtractSize(1); + } + + // `InlinedVector::erase(...)` + // + // Erases the element at `pos`, returning an `iterator` pointing to where the + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferenceable. + iterator erase(const_iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos < end()); + + return storage_.Erase(pos, pos + 1); + } + + // Overload of `InlinedVector::erase(...)` that erases every element in the + // range [`from`, `to`), returning an `iterator` pointing to where the first + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferenceable. + iterator erase(const_iterator from, const_iterator to) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(from >= begin()); + ABSL_HARDENING_ASSERT(from <= to); + ABSL_HARDENING_ASSERT(to <= end()); + + if (ABSL_PREDICT_TRUE(from != to)) + { + return storage_.Erase(from, to); + } + else + { + return const_cast(from); + } + } + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, setting the size to `0` and + // deallocating any held memory. + void clear() noexcept + { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size() + ); + storage_.DeallocateIfAllocated(); + + storage_.SetInlinedSize(0); + } + + // `InlinedVector::reserve(...)` + // + // Ensures that there is enough room for at least `n` elements. + void reserve(size_type n) + { + storage_.Reserve(n); + } + + // `InlinedVector::shrink_to_fit()` + // + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. + // + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. + void shrink_to_fit() + { + if (storage_.GetIsAllocated()) + { + storage_.ShrinkToFit(); + } + } + + // `InlinedVector::swap(...)` + // + // Swaps the contents of the inlined vector with `other`. + void swap(InlinedVector& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + storage_.Swap(std::addressof(other.storage_)); + } + } + + private: + template + friend H AbslHashValue(H h, const absl::InlinedVector& a); + + void MoveAssignment(MemcpyPolicy, InlinedVector&& other) + { + // Assumption check: we shouldn't be told to use memcpy to implement move + // assignment unless we have trivially destructible elements and an + // allocator that does nothing fancy. + static_assert(absl::is_trivially_destructible::value, ""); + static_assert(std::is_same>::value, ""); + + // Throw away our existing heap allocation, if any. There is no need to + // destroy the existing elements one by one because we know they are + // trivially destructible. + storage_.DeallocateIfAllocated(); + + // Adopt the other vector's inline elements or heap allocation. + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + } + + // Destroy our existing elements, if any, and adopt the heap-allocated + // elements of the other vector. + // + // REQUIRES: other.storage_.GetIsAllocated() + void DestroyExistingAndAdopt(InlinedVector&& other) + { + ABSL_HARDENING_ASSERT(other.storage_.GetIsAllocated()); + + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size() + ); + storage_.DeallocateIfAllocated(); + + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + } + + void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) + { + // Fast path: if the other vector is on the heap then we don't worry about + // actually move-assigning each element. Instead we only throw away our own + // existing elements and adopt the heap allocation of the other vector. + if (other.storage_.GetIsAllocated()) + { + DestroyExistingAndAdopt(std::move(other)); + return; + } + + storage_.Assign(IteratorValueAdapter>(MoveIterator(other.storage_.GetInlinedData())), other.size()); + } + + void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) + { + // Fast path: if the other vector is on the heap then we don't worry about + // actually move-assigning each element. Instead we only throw away our own + // existing elements and adopt the heap allocation of the other vector. + if (other.storage_.GetIsAllocated()) + { + DestroyExistingAndAdopt(std::move(other)); + return; + } + + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size() + ); + storage_.DeallocateIfAllocated(); + + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData()) + ); + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, other.storage_.GetSize() + ); + storage_.SetInlinedSize(other.storage_.GetSize()); + } + + Storage storage_; + }; + + // ----------------------------------------------------------------------------- + // InlinedVector Non-Member Functions + // ----------------------------------------------------------------------------- + + // `swap(...)` + // + // Swaps the contents of two inlined vectors. + template + void swap(absl::InlinedVector& a, absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) + { + a.swap(b); + } + + // `operator==(...)` + // + // Tests for value-equality of two inlined vectors. + template + bool operator==(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + auto a_data = a.data(); + auto b_data = b.data(); + return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); + } + + // `operator!=(...)` + // + // Tests for value-inequality of two inlined vectors. + template + bool operator!=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(a == b); + } + + // `operator<(...)` + // + // Tests whether the value of an inlined vector is less than the value of + // another inlined vector using a lexicographical comparison algorithm. + template + bool operator<(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + auto a_data = a.data(); + auto b_data = b.data(); + return std::lexicographical_compare(a_data, a_data + a.size(), b_data, b_data + b.size()); + } + + // `operator>(...)` + // + // Tests whether the value of an inlined vector is greater than the value of + // another inlined vector using a lexicographical comparison algorithm. + template + bool operator>(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return b < a; + } + + // `operator<=(...)` + // + // Tests whether the value of an inlined vector is less than or equal to the + // value of another inlined vector using a lexicographical comparison algorithm. + template + bool operator<=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(b < a); + } + + // `operator>=(...)` + // + // Tests whether the value of an inlined vector is greater than or equal to the + // value of another inlined vector using a lexicographical comparison algorithm. + template + bool operator>=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(a < b); + } + + // `AbslHashValue(...)` + // + // Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to + // call this directly. + template + H AbslHashValue(H h, const absl::InlinedVector& a) + { + auto size = a.size(); + return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree.h b/CAPI/cpp/grpc/include/absl/container/internal/btree.h new file mode 100644 index 00000000..9bee5f87 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree.h @@ -0,0 +1,3675 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A btree implementation of the STL set and map interfaces. A btree is smaller +// and generally also faster than STL set/map (refer to the benchmarks below). +// The red-black tree implementation of STL set/map has an overhead of 3 +// pointers (left, right and parent) plus the node color information for each +// stored value. So a set consumes 40 bytes for each value stored in +// 64-bit mode. This btree implementation stores multiple values on fixed +// size nodes (usually 256 bytes) and doesn't store child pointers for leaf +// nodes. The result is that a btree_set may use much less memory per +// stored value. For the random insertion benchmark in btree_bench.cc, a +// btree_set with node-size of 256 uses 5.1 bytes per stored value. +// +// The packing of multiple values on to each node of a btree has another effect +// besides better space utilization: better cache locality due to fewer cache +// lines being accessed. Better cache locality translates into faster +// operations. +// +// CAVEATS +// +// Insertions and deletions on a btree can cause splitting, merging or +// rebalancing of btree nodes. And even without these operations, insertions +// and deletions on a btree will move values around within a node. In both +// cases, the result is that insertions and deletions can invalidate iterators +// pointing to values other than the one being inserted/deleted. Therefore, this +// container does not provide pointer stability. This is notably different from +// STL set/map which takes care to not invalidate iterators on insert/erase +// except, of course, for iterators pointing to the value being erased. A +// partial workaround when erasing is available: erase() returns an iterator +// pointing to the item just after the one that was erased (or end() if none +// exists). + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/common_policy_traits.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the nodes and +// iterators. When iterators are used, we validate that the container has not +// been mutated since the iterator was constructed. +#define ABSL_BTREE_ENABLE_GENERATIONS +#endif + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + constexpr bool BtreeGenerationsEnabled() + { + return true; + } +#else + constexpr bool BtreeGenerationsEnabled() + { + return false; + } +#endif + + template + using compare_result_t = absl::result_of_t; + + // A helper class that indicates if the Compare parameter is a key-compare-to + // comparator. + template + using btree_is_key_compare_to = + std::is_convertible, absl::weak_ordering>; + + struct StringBtreeDefaultLess + { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) + { + } // NOLINT + StringBtreeDefaultLess(std::less) + { + } // NOLINT + + // Allow converting to std::less for use in key_comp()/value_comp(). + explicit operator std::less() const + { + return {}; + } + explicit operator std::less() const + { + return {}; + } + explicit operator std::less() const + { + return {}; + } + + absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } + StringBtreeDefaultLess(std::less) + { + } // NOLINT + absl::weak_ordering operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); + } + }; + + struct StringBtreeDefaultGreater + { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + + // Allow converting to std::greater for use in key_comp()/value_comp(). + explicit operator std::greater() const + { + return {}; + } + explicit operator std::greater() const + { + return {}; + } + explicit operator std::greater() const + { + return {}; + } + + absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + absl::weak_ordering operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + absl::weak_ordering operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + }; + + // See below comments for checked_compare. + template::value> + struct checked_compare_base : Compare + { + using Compare::Compare; + explicit checked_compare_base(Compare c) : + Compare(std::move(c)) + { + } + const Compare& comp() const + { + return *this; + } + }; + template + struct checked_compare_base + { + explicit checked_compare_base(Compare c) : + compare(std::move(c)) + { + } + const Compare& comp() const + { + return compare; + } + Compare compare; + }; + + // A mechanism for opting out of checked_compare for use only in btree_test.cc. + struct BtreeTestOnlyCheckedCompareOptOutBase + { + }; + + // A helper class to adapt the specified comparator for two use cases: + // (1) When using common Abseil string types with common comparison functors, + // convert a boolean comparison into a three-way comparison that returns an + // `absl::weak_ordering`. This helper class is specialized for + // less, greater, less, + // greater, less, and greater. + // (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever + // a comparison is made, we will make assertions to verify that the comparator + // is valid. + template + struct key_compare_adapter + { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a + // CompressedTuple and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base + { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key& k) const + { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T&) const + { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : + Base(std::move(comp)) + { + } // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const + { + return comp(); + } + + template>::value, int> = 0> + bool operator()(const T& lhs, const U& rhs) const + { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template< + typename T, + typename U, + absl::enable_if_t, absl::weak_ordering>::value, int> = 0> + absl::weak_ordering operator()(const T& lhs, const U& rhs) const + { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); +#ifndef NDEBUG + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) + { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } + else if (lhs_comp_rhs == 0) + { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } + else + { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +#endif + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, + checked_compare>; + }; + + template<> + struct key_compare_adapter, std::string> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, std::string> + { + using type = StringBtreeDefaultGreater; + }; + + template<> + struct key_compare_adapter, absl::string_view> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, absl::string_view> + { + using type = StringBtreeDefaultGreater; + }; + + template<> + struct key_compare_adapter, absl::Cord> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, absl::Cord> + { + using type = StringBtreeDefaultGreater; + }; + + // Detects an 'absl_btree_prefer_linear_node_search' member. This is + // a protocol used as an opt-in or opt-out of linear search. + // + // For example, this would be useful for key types that wrap an integer + // and define their own cheap operator<(). For example: + // + // class K { + // public: + // using absl_btree_prefer_linear_node_search = std::true_type; + // ... + // private: + // friend bool operator<(K a, K b) { return a.k_ < b.k_; } + // int k_; + // }; + // + // btree_map m; // Uses linear search + // + // If T has the preference tag, then it has a preference. + // Btree will use the tag's truth value. + template + struct has_linear_node_search_preference : std::false_type + { + }; + template + struct prefers_linear_node_search : std::false_type + { + }; + template + struct has_linear_node_search_preference< + T, + absl::void_t> : std::true_type + { + }; + template + struct prefers_linear_node_search< + T, + absl::void_t> : T::absl_btree_prefer_linear_node_search + { + }; + + template + constexpr bool compare_has_valid_result_type() + { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; + } + + template + class map_value_compare + { + template + friend class btree; + + // Note: this `protected` is part of the API of std::map::value_compare. See + // https://en.cppreference.com/w/cpp/container/map/value_compare. + + protected: + explicit map_value_compare(original_key_compare c) : + comp(std::move(c)) + { + } + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type& lhs, const value_type& rhs) const + -> decltype(comp(lhs.first, rhs.first)) + { + return comp(lhs.first, rhs.first); + } + }; + + template + struct common_params : common_policy_traits + { + using original_key_compare = Compare; + + // If Compare is a common comparator for a string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), Compare, typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || kIsKeyCompareStringAdapted; + + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + + using value_compare = + absl::conditional_t, original_key_compare>; + using is_map_container = std::integral_constant; + + // For the given lookup key type, returns whether we can have multiple + // equivalent keys in the btree. If this is a multi-container, then we can. + // Otherwise, we can have multiple equivalent keys only if all of the + // following conditions are met: + // - The comparator is transparent. + // - The lookup key type is not the same as key_type. + // - The comparator is not a StringBtreeDefault{Less,Greater} comparator + // that we know has the same equivalence classes for all lookup types. + template + constexpr static bool can_have_multiple_equivalent_keys() + { + return IsMulti || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); + } + + enum + { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for slots. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void*) + 4), + }; + + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT + }; + + // An adapter class that converts a lower-bound compare into an upper-bound + // compare. Note: there is no need to make a version of this adapter specialized + // for key-compare-to functors because the upper-bound (the first value greater + // than the input) is never an exact match. + template + struct upper_bound_adapter + { + explicit upper_bound_adapter(const Compare& c) : + comp(c) + { + } + template + bool operator()(const K1& a, const K2& b) const + { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; + }; + + enum class MatchKind : uint8_t + { + kEq, + kNe + }; + + template + struct SearchResult + { + V value; + MatchKind match; + + static constexpr bool HasMatch() + { + return true; + } + bool IsEq() const + { + return match == MatchKind::kEq; + } + }; + + // When we don't use CompareTo, `match` is not present. + // This ensures that callers can't use it accidentally when it provides no + // useful information. + template + struct SearchResult + { + SearchResult() + { + } + explicit SearchResult(V v) : + value(v) + { + } + SearchResult(V v, MatchKind /*match*/) : + value(v) + { + } + + V value; + + static constexpr bool HasMatch() + { + return false; + } + static constexpr bool IsEq() + { + return false; + } + }; + + // A node in the btree holding. The same node type is used for both internal + // and leaf nodes in the btree, though the nodes are allocated in such a way + // that the children array is only valid in internal nodes. + template + class btree_node + { + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the comparator expresses a preference, use that. + // - If the key expresses a preference, use that. + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, + has_linear_node_search_preference::value ? prefers_linear_node_search::value : has_linear_node_search_preference::value ? prefers_linear_node_search::value : + std::is_arithmetic::value && (std::is_same, original_key_compare>::value || std::is_same, original_key_compare>::value)>; + + // This class is organized by absl::container_internal::Layout as if it had + // the following structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The index after the last populated value in `values`. Currently, this + // // is the same as the count of values. + // field_type finish; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeSlots values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeSlots for internal nodes. Only the values in + // // [start, finish) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for + // // internal nodes. + // btree_node *children[kNodeSlots + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const&) = delete; + btree_node& operator=(btree_node const&) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() + { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = + absl::container_internal::Layout; + constexpr static size_type SizeWithNSlots(size_type n) + { + return layout_type( + /*parent*/ 1, + /*generation*/ BtreeGenerationsEnabled() ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0 + ) + .AllocSize(); + } + // A lower bound for the overhead of fields other than slots in a leaf node. + constexpr static size_type MinimumOverhead() + { + return SizeWithNSlots(1) - sizeof(slot_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetSlots(const size_type begin, const size_type end) + { + return begin == end ? begin : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize ? NodeTargetSlots(begin, (begin + end) / 2) : + NodeTargetSlots((begin + end) / 2 + 1, end); + } + + constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize; + constexpr static size_type kNodeTargetSlots = + NodeTargetSlots(0, kTargetNodeSize); + + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of + // 1/3 (for a node, not a b-tree). + constexpr static size_type kMinNodeSlots = 4; + + constexpr static size_type kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots; + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + constexpr static field_type kInternalNodeMaxCount = 0; + + constexpr static layout_type Layout(const size_type slot_count, const size_type child_count) + { + return layout_type( + /*parent*/ 1, + /*generation*/ BtreeGenerationsEnabled() ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ child_count + ); + } + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout( + const size_type slot_count = kNodeSlots + ) + { + return Layout(slot_count, 0); + } + constexpr static layout_type InternalLayout() + { + return Layout(kNodeSlots, kNodeSlots + 1); + } + constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) + { + return LeafLayout(slot_count).AllocSize(); + } + constexpr static size_type InternalSize() + { + return InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType* GetField() + { + // We assert that we don't read from values that aren't there. + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + template + inline const typename layout_type::template ElementType* GetField() const + { + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer( + reinterpret_cast(this) + ); + } + void set_parent(btree_node* p) + { + *GetField<0>() = p; + } + field_type& mutable_finish() + { + return GetField<2>()[2]; + } + slot_type* slot(size_type i) + { + return &GetField<3>()[i]; + } + slot_type* start_slot() + { + return slot(start()); + } + slot_type* finish_slot() + { + return slot(finish()); + } + const slot_type* slot(size_type i) const + { + return &GetField<3>()[i]; + } + void set_position(field_type v) + { + GetField<2>()[0] = v; + } + void set_start(field_type v) + { + GetField<2>()[1] = v; + } + void set_finish(field_type v) + { + GetField<2>()[2] = v; + } + // This method is only called by the node init methods. + void set_max_count(field_type v) + { + GetField<2>()[3] = v; + } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool is_leaf() const + { + return GetField<2>()[3] != kInternalNodeMaxCount; + } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const + { + return !is_leaf(); + } + + // Getter for the position of this node in its parent. + field_type position() const + { + return GetField<2>()[0]; + } + + // Getter for the offset of the first value in the `values` array. + field_type start() const + { + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); + return 0; + } + + // Getter for the offset after the last value in the `values` array. + field_type finish() const + { + return GetField<2>()[2]; + } + + // Getters for the number of values stored in this node. + field_type count() const + { + assert(finish() >= start()); + return finish() - start(); + } + field_type max_count() const + { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeSlots]. + const field_type max_count = GetField<2>()[3]; + return max_count == field_type{kInternalNodeMaxCount} ? field_type{kNodeSlots} : max_count; + } + + // Getter for the parent of this node. + btree_node* parent() const + { + return *GetField<0>(); + } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const + { + return parent()->is_leaf(); + } + void make_root() + { + assert(parent()->is_root()); + set_generation(parent()->generation()); + set_parent(parent()->parent()); + } + + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t* get_root_generation() const + { + assert(BtreeGenerationsEnabled()); + const btree_node* curr = this; + for (; !curr->is_root(); curr = curr->parent()) + continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const + { + return BtreeGenerationsEnabled() ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) + { + if (BtreeGenerationsEnabled()) + GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() + { + if (BtreeGenerationsEnabled()) + ++*get_root_generation(); + } + + // Getters for the key/value at position i in the node. + const key_type& key(size_type i) const + { + return params_type::key(slot(i)); + } + reference value(size_type i) + { + return params_type::element(slot(i)); + } + const_reference value(size_type i) const + { + return params_type::element(slot(i)); + } + + // Getters/setter for the child at position i in the node. + btree_node* child(field_type i) const + { + return GetField<4>()[i]; + } + btree_node* start_child() const + { + return child(start()); + } + btree_node*& mutable_child(field_type i) + { + return GetField<4>()[i]; + } + void clear_child(field_type i) + { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child_noupdate_position(field_type i, btree_node* c) + { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + } + void set_child(field_type i, btree_node* c) + { + set_child_noupdate_position(i, c); + c->set_position(i); + } + void init_child(field_type i, btree_node* c) + { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K& k, const key_compare& comp + ) const + { + return use_linear_search::value ? linear_search(k, comp) : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + size_type upper_bound(const K& k, const key_compare& comp) const + { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K& k, const Compare& comp) const + { + return linear_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K& k, const Compare& comp) const + { + return binary_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K& k, size_type s, const size_type e, const Compare& comp, std::false_type /* IsCompareTo */ + ) const + { + while (s < e) + { + if (!comp(key(s), k)) + { + break; + } + ++s; + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K& k, size_type s, const size_type e, const Compare& comp, std::true_type /* IsCompareTo */ + ) const + { + while (s < e) + { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) + { + return {s, MatchKind::kEq}; + } + else if (c > 0) + { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K& k, size_type s, size_type e, const Compare& comp, std::false_type /* IsCompareTo */ + ) const + { + while (s != e) + { + const size_type mid = (s + e) >> 1; + if (comp(key(mid), k)) + { + s = mid + 1; + } + else + { + e = mid; + } + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K& k, size_type s, size_type e, const CompareTo& comp, std::true_type /* IsCompareTo */ + ) const + { + if (params_type::template can_have_multiple_equivalent_keys()) + { + MatchKind exact_match = MatchKind::kNe; + while (s != e) + { + const size_type mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) + { + s = mid + 1; + } + else + { + e = mid; + if (c == 0) + { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if there could be + // multiple equivalent keys. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } + else + { // Can't have multiple equivalent keys. + while (s != e) + { + const size_type mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) + { + s = mid + 1; + } + else if (c > 0) + { + e = mid; + } + else + { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Returns whether key i is ordered correctly with respect to the other keys + // in the node. The motivation here is to detect comparators that violate + // transitivity. Note: we only do comparisons of keys on this node rather than + // the whole tree so that this is constant time. + template + bool is_ordered_correctly(field_type i, const Compare& comp) const + { + if (std::is_base_of::value || + params_type::kIsKeyCompareStringAdapted) + { + return true; + } + + const auto compare = [&](field_type a, field_type b) + { + const absl::weak_ordering cmp = + compare_internal::do_three_way_comparison(comp, key(a), key(b)); + return cmp < 0 ? -1 : cmp > 0 ? 1 : + 0; + }; + int cmp = -1; + constexpr bool kCanHaveEquivKeys = + params_type::template can_have_multiple_equivalent_keys(); + for (field_type j = start(); j < finish(); ++j) + { + if (j == i) + { + if (cmp > 0) + return false; + continue; + } + int new_cmp = compare(j, i); + if (new_cmp < cmp || (!kCanHaveEquivKeys && new_cmp == 0)) + return false; + cmp = new_cmp; + } + return true; + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(field_type i, allocator_type* alloc, Args&&... args); + + // Removes the values at positions [i, i + to_erase), shifting all existing + // values and children after that range to the left by to_erase. Clears all + // children between [i, i + to_erase). + void remove_values(field_type i, field_type to_erase, allocator_type* alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(field_type to_move, btree_node* right, allocator_type* alloc); + void rebalance_left_to_right(field_type to_move, btree_node* right, allocator_type* alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node* dest, allocator_type* alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself, and deleting the src node. + void merge(btree_node* src, allocator_type* alloc); + + // Node allocation/deletion routines. + void init_leaf(field_type position, field_type max_count, btree_node* parent) + { + set_generation(0); + set_parent(parent); + set_position(position); + set_start(0); + set_finish(0); + set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + start_slot(), max_count * sizeof(slot_type) + ); + } + void init_internal(field_type position, btree_node* parent) + { + init_leaf(position, kNodeSlots, parent); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node*) + ); + } + + static void deallocate(const size_type size, btree_node* node, allocator_type* alloc) + { + absl::container_internal::SanitizerUnpoisonMemoryRegion(node, size); + absl::container_internal::Deallocate(alloc, node, size); + } + + // Deletes a node and all of its children. + static void clear_and_delete(btree_node* node, allocator_type* alloc); + + private: + template + void value_init(const field_type i, allocator_type* alloc, Args&&... args) + { + next_generation(); + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const field_type i, allocator_type* alloc) + { + next_generation(); + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + void value_destroy_n(const field_type i, const field_type n, allocator_type* alloc) + { + next_generation(); + for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) + { + params_type::destroy(alloc, s); + absl::container_internal::SanitizerPoisonObject(s); + } + } + + static void transfer(slot_type* dest, slot_type* src, allocator_type* alloc) + { + absl::container_internal::SanitizerUnpoisonObject(dest); + params_type::transfer(alloc, dest, src); + absl::container_internal::SanitizerPoisonObject(src); + } + + // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + transfer(slot(dest_i), src_node->slot(src_i), alloc); + } + + // Transfers `n` values starting at value `src_i` in `src_node` into the + // values starting at value `dest_i` in `this`. + void transfer_n(const size_type n, const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + for (slot_type *src = src_node->slot(src_i), *end = src + n, *dest = slot(dest_i); + src != end; + ++src, ++dest) + { + transfer(dest, src, alloc); + } + } + + // Same as above, except that we start at the end and work our way to the + // beginning. + void transfer_n_backward(const size_type n, const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + for (slot_type *src = src_node->slot(src_i + n), *end = src - n, *dest = slot(dest_i + n); + src != end; + --src, --dest) + { + // If we modified the loop index calculations above to avoid the -1s here, + // it would result in UB in the computation of `end` (and possibly `src` + // as well, if n == 0), since slot() is effectively an array index and it + // is UB to compute the address of any out-of-bounds array element except + // for one-past-the-end. + transfer(dest - 1, src - 1, alloc); + } + } + + template + friend class btree; + template + friend class btree_iterator; + friend class BtreeNodePeer; + friend struct btree_access; + }; + + template + bool AreNodesFromSameContainer(const Node* node_a, const Node* node_b) + { + // If either node is null, then give up on checking whether they're from the + // same container. (If exactly one is null, then we'll trigger the + // default-constructed assert in Equals.) + if (node_a == nullptr || node_b == nullptr) + return true; + while (!node_a->is_root()) + node_a = node_a->parent(); + while (!node_b->is_root()) + node_b = node_b->parent(); + return node_a == node_b; + } + + class btree_iterator_generation_info_enabled + { + public: + explicit btree_iterator_generation_info_enabled(uint32_t g) : + generation_(g) + { + } + + // Updates the generation. For use internally right before we return an + // iterator to the user. + template + void update_generation(const Node* node) + { + if (node != nullptr) + generation_ = node->generation(); + } + uint32_t generation() const + { + return generation_; + } + + template + void assert_valid_generation(const Node* node) const + { + if (node != nullptr && node->generation() != generation_) + { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed." + ); + } + } + + private: + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; + }; + + class btree_iterator_generation_info_disabled + { + public: + explicit btree_iterator_generation_info_disabled(uint32_t) + { + } + static void update_generation(const void*) + { + } + static uint32_t generation() + { + return 0; + } + static void assert_valid_generation(const void*) + { + } + }; + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + using btree_iterator_generation_info = btree_iterator_generation_info_enabled; +#else + using btree_iterator_generation_info = btree_iterator_generation_info_disabled; +#endif + + template + class btree_iterator : private btree_iterator_generation_info + { + using field_type = typename Node::field_type; + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + using is_map_container = typename params_type::is_map_container; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : + btree_iterator(nullptr, -1) + { + } + explicit btree_iterator(Node* n) : + btree_iterator(n, n->start()) + { + } + btree_iterator(Node* n, int p) : + btree_iterator_generation_info(n != nullptr ? n->generation() : ~uint32_t{}), + node_(n), + position_(p) + { + } + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids hiding the copy constructor so + // that the trivial one will be used when possible. + template, iterator>::value && std::is_same::value, int> = 0> + btree_iterator(const btree_iterator other) // NOLINT + : + btree_iterator_generation_info(other), + node_(other.node_), + position_(other.position_) + { + } + + bool operator==(const iterator& other) const + { + return Equals(other); + } + bool operator==(const const_iterator& other) const + { + return Equals(other); + } + bool operator!=(const iterator& other) const + { + return !Equals(other); + } + bool operator!=(const const_iterator& other) const + { + return !Equals(other); + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists. + difference_type operator-(const_iterator other) const + { + if (node_ == other.node_) + { + if (node_->is_leaf()) + return position_ - other.position_; + if (position_ == other.position_) + return 0; + } + return distance_slow(other); + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const + { + ABSL_HARDENING_ASSERT(node_ != nullptr); + assert_valid_generation(node_); + ABSL_HARDENING_ASSERT(position_ >= node_->start()); + if (position_ >= node_->finish()) + { + ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator"); + ABSL_HARDENING_ASSERT(position_ < node_->finish()); + } + return node_->value(static_cast(position_)); + } + pointer operator->() const + { + return &operator*(); + } + + btree_iterator& operator++() + { + increment(); + return *this; + } + btree_iterator& operator--() + { + decrement(); + return *this; + } + btree_iterator operator++(int) + { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) + { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + friend iterator; + friend const_iterator; + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend class base_checker; + friend struct btree_access; + + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template, const_iterator>::value && std::is_same::value, int> = 0> + explicit btree_iterator(const btree_iterator other) : + btree_iterator_generation_info(other.generation()), + node_(const_cast(other.node_)), + position_(other.position_) + { + } + + bool Equals(const const_iterator other) const + { + ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) || (node_ != nullptr && other.node_ != nullptr)) && "Comparing default-constructed iterator with " + "non-default-constructed iterator."); + // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this + // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where + // N/M are sizes of the containers containing node_/other.node_. + assert(AreNodesFromSameContainer(node_, other.node_) && "Comparing iterators from different containers."); + assert_valid_generation(node_); + other.assert_valid_generation(other.node_); + return node_ == other.node_ && position_ == other.position_; + } + + bool IsEndIterator() const + { + if (position_ != node_->finish()) + return false; + node_type* node = node_; + while (!node->is_root()) + { + if (node->position() != node->parent()->finish()) + return false; + node = node->parent(); + } + return true; + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists && (this->node_ != other.node_ || + // !this->node_->is_leaf() || this->position_ != other.position_). + difference_type distance_slow(const_iterator other) const; + + // Increment/decrement the iterator. + void increment() + { + assert_valid_generation(node_); + if (node_->is_leaf() && ++position_ < node_->finish()) + { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() + { + assert_valid_generation(node_); + if (node_->is_leaf() && --position_ >= node_->start()) + { + return; + } + decrement_slow(); + } + void decrement_slow(); + + const key_type& key() const + { + return node_->key(static_cast(position_)); + } + decltype(std::declval()->slot(0)) slot() + { + return node_->slot(static_cast(position_)); + } + + void update_generation() + { + btree_iterator_generation_info::update_generation(node_); + } + + // The node in the tree the iterator is pointing at. + Node* node_; + // The position within the node of the tree the iterator is pointing at. + // NOTE: this is an int rather than a field_type because iterators can point + // to invalid positions (such as -1) in certain circumstances. + int position_; + }; + + template + class btree + { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename node_type::field_type; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type + { + using field_type = typename node_type::field_type; + node_type* parent; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + uint32_t generation = 0; +#endif + field_type position = 0; + field_type start = 0; + field_type finish = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + +#ifdef _MSC_VER + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : + parent(this) + { + } +#else + explicit constexpr EmptyNodeType(node_type* p) : + parent(p) + { + } +#endif + }; + + static node_type* EmptyNode() + { +#ifdef _MSC_VER + static EmptyNodeType* empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; +#else + static constexpr EmptyNodeType empty_node( + const_cast(&empty_node) + ); + return const_cast(&empty_node); +#endif + } + + enum : uint32_t + { + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, + }; + + struct node_stats + { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) : + leaf_nodes(l), + internal_nodes(i) + { + } + + node_stats& operator+=(const node_stats& other) + { + leaf_nodes += other.leaf_nodes; + internal_nodes += other.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using original_key_compare = typename Params::original_key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = + typename btree_iterator::iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // Copies or moves (depending on the template parameter) the values in + // other into this btree in their order in other. This btree must be empty + // before this method is called. This method is used in copy construction, + // copy assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree& other); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare& comp, const allocator_type& alloc) : + root_(EmptyNode()), + rightmost_(comp, alloc, EmptyNode()), + size_(0) + { + } + + btree(const btree& other) : + btree(other, other.allocator()) + { + } + btree(const btree& other, const allocator_type& alloc) : + btree(other.key_comp(), alloc) + { + copy_or_move_values_in_order(other); + } + btree(btree&& other) noexcept + : + root_(absl::exchange(other.root_, EmptyNode())), + rightmost_(std::move(other.rightmost_)), + size_(absl::exchange(other.size_, 0u)) + { + other.mutable_rightmost() = EmptyNode(); + } + btree(btree&& other, const allocator_type& alloc) : + btree(other.key_comp(), alloc) + { + if (alloc == other.allocator()) + { + swap(other); + } + else + { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } + + ~btree() + { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of other to *this. + btree& operator=(const btree& other); + btree& operator=(btree&& other) noexcept; + + iterator begin() + { + return iterator(leftmost()); + } + const_iterator begin() const + { + return const_iterator(leftmost()); + } + iterator end() + { + return iterator(rightmost(), rightmost()->finish()); + } + const_iterator end() const + { + return const_iterator(rightmost(), rightmost()->finish()); + } + reverse_iterator rbegin() + { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const + { + return const_reverse_iterator(end()); + } + reverse_iterator rend() + { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const + { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than `key`. + template + iterator lower_bound(const K& key) + { + return internal_end(internal_lower_bound(key).value); + } + template + const_iterator lower_bound(const K& key) const + { + return internal_end(internal_lower_bound(key).value); + } + + // Finds the first element whose key is not less than `key` and also returns + // whether that element is equal to `key`. + template + std::pair lower_bound_equal(const K& key) const; + + // Finds the first element whose key is greater than `key`. + template + iterator upper_bound(const K& key) + { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K& key) const + { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member of the + // pair is equal to upper_bound(key). + template + std::pair equal_range(const K& key); + template + std::pair equal_range(const K& key) const + { + return const_cast(this)->equal_range(key); + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const K& key, Args&&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, const K& key, Args&&... args); + + // Insert a range of values into the btree. + // Note: the first overload avoids constructing a value_type if the key + // already exists in the btree. + template()(params_type::key(*std::declval()), std::declval()))> + void insert_iterator_unique(InputIterator b, InputIterator e, int); + // We need the second overload for cases in which we need to construct a + // value_type in order to compare it with the keys already in the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e, char); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type& key, ValueType&& v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType&& v) + { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType&& v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase_range(iterator begin, iterator end); + + // Finds an element with key equivalent to `key` or returns `end()` if `key` + // is not present. + template + iterator find(const K& key) + { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K& key) const + { + return internal_end(internal_find(key)); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swaps the contents of `this` and `other`. + void swap(btree& other); + + const key_compare& key_comp() const noexcept + { + return rightmost_.template get<0>(); + } + template + bool compare_keys(const K1& a, const K2& b) const + { + return compare_internal::compare_result_as_less_than(key_comp()(a, b)); + } + + value_compare value_comp() const + { + return value_compare(original_key_compare(key_comp())); + } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const + { + return size_; + } + size_type max_size() const + { + return (std::numeric_limits::max)(); + } + bool empty() const + { + return size_ == 0; + } + + // The height of the btree. An empty tree will have height 0. + size_type height() const + { + size_type h = 0; + if (!empty()) + { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type* n = root(); + do + { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const + { + return internal_stats(root()).leaf_nodes; + } + size_type internal_nodes() const + { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const + { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. + size_type bytes_used() const + { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) + { + return sizeof(*this) + node_type::LeafSize(root()->max_count()); + } + else + { + return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree assuming + // random insertion order. + static double average_bytes_per_value() + { + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const + { + if (empty()) + return 0.0; + return static_cast(size()) / (nodes() * kNodeSlots); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const + { + if (empty()) + return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const + { + return allocator(); + } + + private: + friend struct btree_access; + + // Internal accessor routines. + node_type* root() + { + return root_; + } + const node_type* root() const + { + return root_; + } + node_type*& mutable_root() noexcept + { + return root_; + } + node_type* rightmost() + { + return rightmost_.template get<2>(); + } + const node_type* rightmost() const + { + return rightmost_.template get<2>(); + } + node_type*& mutable_rightmost() noexcept + { + return rightmost_.template get<2>(); + } + key_compare* mutable_key_comp() noexcept + { + return &rightmost_.template get<0>(); + } + + // The leftmost node is stored as the parent of the root node. + node_type* leftmost() + { + return root()->parent(); + } + const node_type* leftmost() const + { + return root()->parent(); + } + + // Allocator routines. + allocator_type* mutable_allocator() noexcept + { + return &rightmost_.template get<1>(); + } + const allocator_type& allocator() const noexcept + { + return rightmost_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type* allocate(size_type size) + { + return reinterpret_cast( + absl::container_internal::Allocate( + mutable_allocator(), size + ) + ); + } + + // Node creation/deletion routines. + node_type* new_internal_node(field_type position, node_type* parent) + { + node_type* n = allocate(node_type::InternalSize()); + n->init_internal(position, parent); + return n; + } + node_type* new_leaf_node(field_type position, node_type* parent) + { + node_type* n = allocate(node_type::LeafSize()); + n->init_leaf(position, kNodeSlots, parent); + return n; + } + node_type* new_leaf_root_node(field_type max_count) + { + node_type* n = allocate(node_type::LeafSize(max_count)); + n->init_leaf(/*position=*/0, max_count, /*parent=*/n); + return n; + } + + // Deletion helper routines. + iterator rebalance_after_delete(iterator iter); + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator* iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type* left, node_type* right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator* iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) + { + return iter.node_ != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const + { + return iter.node_ != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args&&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location such + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree, unless there is an exact match - in which case, the + // result may not be on a leaf. When there's a three-way comparator, we can + // return whether there was an exact match. This allows the caller to avoid a + // subsequent comparison to determine if an exact match was made, which is + // important for keys with expensive comparison, such as strings. + template + SearchResult internal_locate( + const K& key + ) const; + + // Internal routine which implements lower_bound(). + template + SearchResult internal_lower_bound( + const K& key + ) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K& key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K& key) const; + + // Verifies the tree structure of node. + size_type internal_verify(const node_type* node, const key_type* lo, const key_type* hi) const; + + node_stats internal_stats(const node_type* node) const + { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) + { + return node_stats(0, 0); + } + if (node->is_leaf()) + { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = node->start(); i <= node->finish(); ++i) + { + res += internal_stats(node->child(i)); + } + return res; + } + + node_type* root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. We use compressed tuple in order to save space because + // key_compare and allocator_type are usually empty. + absl::container_internal::CompressedTuple + rightmost_; + + // Number of values. + size_type size_; + }; + + //// + // btree_node methods + template + template + inline void btree_node

::emplace_value(const field_type i, allocator_type* alloc, Args&&... args) + { + assert(i >= start()); + assert(i <= finish()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < finish()) + { + transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, alloc); + } + value_init(static_cast(i), alloc, std::forward(args)...); + set_finish(finish() + 1); + + if (is_internal() && finish() > i + 1) + { + for (field_type j = finish(); j > i + 1; --j) + { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } + } + + template + inline void btree_node

::remove_values(const field_type i, const field_type to_erase, allocator_type* alloc) + { + // Transfer values after the removed range into their new places. + value_destroy_n(i, to_erase, alloc); + const field_type orig_finish = finish(); + const field_type src_i = i + to_erase; + transfer_n(orig_finish - src_i, i, src_i, this, alloc); + + if (is_internal()) + { + // Delete all children between begin and end. + for (field_type j = 0; j < to_erase; ++j) + { + clear_and_delete(child(i + j + 1), alloc); + } + // Rotate children after end into new positions. + for (field_type j = i + to_erase + 1; j <= orig_finish; ++j) + { + set_child(j - to_erase, child(j)); + clear_child(j); + } + } + set_finish(orig_finish - to_erase); + } + + template + void btree_node

::rebalance_right_to_left(field_type to_move, btree_node* right, allocator_type* alloc) + { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + transfer(finish(), position(), parent(), alloc); + + // 2) Move the (to_move - 1) values from the right node to the left node. + transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + parent()->transfer(position(), right->start() + to_move - 1, right, alloc); + + // 4) Shift the values in the right node to their correct positions. + right->transfer_n(right->count() - to_move, right->start(), right->start() + to_move, right, alloc); + + if (is_internal()) + { + // Move the child pointers from the right to the left node. + for (field_type i = 0; i < to_move; ++i) + { + init_child(finish() + i + 1, right->child(i)); + } + for (field_type i = right->start(); i <= right->finish() - to_move; ++i) + { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup `finish` on the left and right nodes. + set_finish(finish() + to_move); + right->set_finish(right->finish() - to_move); + } + + template + void btree_node

::rebalance_left_to_right(field_type to_move, btree_node* right, allocator_type* alloc) + { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + // 1) Shift existing values in the right node to their correct positions. + right->transfer_n_backward(right->count(), right->start() + to_move, right->start(), right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->transfer(right->start() + to_move - 1, position(), parent(), alloc); + + // 3) Move the (to_move - 1) values from the left node to the right node. + right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, alloc); + + // 4) Move the new delimiting value to the parent from the left node. + parent()->transfer(position(), finish() - to_move, this, alloc); + + if (is_internal()) + { + // Move the child pointers from the left to the right node. + for (field_type i = right->finish() + 1; i > right->start(); --i) + { + right->init_child(i - 1 + to_move, right->child(i - 1)); + right->clear_child(i - 1); + } + for (field_type i = 1; i <= to_move; ++i) + { + right->init_child(i - 1, child(finish() - to_move + i)); + clear_child(finish() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_finish(finish() - to_move); + right->set_finish(right->finish() + to_move); + } + + template + void btree_node

::split(const int insert_position, btree_node* dest, allocator_type* alloc) + { + assert(dest->count() == 0); + assert(max_count() == kNodeSlots); + assert(position() + 1 == dest->position()); + assert(parent() == dest->parent()); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == start()) + { + dest->set_finish(dest->start() + finish() - 1); + } + else if (insert_position == kNodeSlots) + { + dest->set_finish(dest->start()); + } + else + { + dest->set_finish(dest->start() + count() / 2); + } + set_finish(finish() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); + + // The split key is the largest value in the left sibling. + --mutable_finish(); + parent()->emplace_value(position(), alloc, finish_slot()); + value_destroy(finish(), alloc); + parent()->set_child_noupdate_position(position() + 1, dest); + + if (is_internal()) + { + for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish(); + ++i, ++j) + { + assert(child(j) != nullptr); + dest->init_child(i, child(j)); + clear_child(j); + } + } + } + + template + void btree_node

::merge(btree_node* src, allocator_type* alloc) + { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + transfer_n(src->count(), finish() + 1, src->start(), src, alloc); + + if (is_internal()) + { + // Move the child pointers from the right to the left node. + for (field_type i = src->start(), j = finish() + 1; i <= src->finish(); + ++i, ++j) + { + init_child(j, src->child(i)); + src->clear_child(i); + } + } + + // Fixup `finish` on the src and dest nodes. + set_finish(start() + 1 + count() + src->count()); + src->set_finish(src->start()); + + // Remove the value on the parent node and delete the src node. + parent()->remove_values(position(), /*to_erase=*/1, alloc); + } + + template + void btree_node

::clear_and_delete(btree_node* node, allocator_type* alloc) + { + if (node->is_leaf()) + { + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(LeafSize(node->max_count()), node, alloc); + return; + } + if (node->count() == 0) + { + deallocate(InternalSize(), node, alloc); + return; + } + + // The parent of the root of the subtree we are deleting. + btree_node* delete_root_parent = node->parent(); + + // Navigate to the leftmost leaf under node, and then delete upwards. + while (node->is_internal()) + node = node->start_child(); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node* leftmost_leaf = node; +#endif + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); + btree_node* parent = node->parent(); + for (;;) + { + // In each iteration of the next loop, we delete one leaf node and go right. + assert(pos <= parent->finish()); + do + { + node = parent->child(static_cast(pos)); + if (node->is_internal()) + { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) + node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + node->value_destroy_n(node->start(), node->count(), alloc); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (leftmost_leaf != node) +#endif + deallocate(LeafSize(node->max_count()), node, alloc); + ++pos; + } while (pos <= parent->finish()); + + // Once we've deleted all children of parent, delete parent and go up/right. + assert(pos > parent->finish()); + do + { + node = parent; + pos = node->position(); + parent = node->parent(); + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(InternalSize(), node, alloc); + if (parent == delete_root_parent) + { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); +#endif + return; + } + ++pos; + } while (pos > parent->finish()); + } + } + + //// + // btree_iterator methods + + // Note: the implementation here is based on btree_node::clear_and_delete. + template + auto btree_iterator::distance_slow(const_iterator other) const + -> difference_type + { + const_iterator begin = other; + const_iterator end = *this; + assert(begin.node_ != end.node_ || !begin.node_->is_leaf() || begin.position_ != end.position_); + + const node_type* node = begin.node_; + // We need to compensate for double counting if begin.node_ is a leaf node. + difference_type count = node->is_leaf() ? -begin.position_ : 0; + + // First navigate to the leftmost leaf node past begin. + if (node->is_internal()) + { + ++count; + node = node->child(begin.position_ + 1); + } + while (node->is_internal()) + node = node->start_child(); + + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); + const node_type* parent = node->parent(); + for (;;) + { + // In each iteration of the next loop, we count one leaf node and go right. + assert(pos <= parent->finish()); + do + { + node = parent->child(static_cast(pos)); + if (node->is_internal()) + { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) + node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + if (node == end.node_) + return count + end.position_; + if (parent == end.node_ && pos == static_cast(end.position_)) + return count + node->count(); + // +1 is for the next internal node value. + count += node->count() + 1; + ++pos; + } while (pos <= parent->finish()); + + // Once we've counted all children of parent, go up/right. + assert(pos > parent->finish()); + do + { + node = parent; + pos = node->position(); + parent = node->parent(); + // -1 because we counted the value at end and shouldn't. + if (parent == end.node_ && pos == static_cast(end.position_)) + return count - 1; + ++pos; + } while (pos > parent->finish()); + } + } + + template + void btree_iterator::increment_slow() + { + if (node_->is_leaf()) + { + assert(position_ >= node_->finish()); + btree_iterator save(*this); + while (position_ == node_->finish() && !node_->is_root()) + { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't incrementing end() instead of handling. + if (position_ == node_->finish()) + { + *this = save; + } + } + else + { + assert(position_ < node_->finish()); + node_ = node_->child(static_cast(position_ + 1)); + while (node_->is_internal()) + { + node_ = node_->start_child(); + } + position_ = node_->start(); + } + } + + template + void btree_iterator::decrement_slow() + { + if (node_->is_leaf()) + { + assert(position_ <= -1); + btree_iterator save(*this); + while (position_ < node_->start() && !node_->is_root()) + { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't decrementing begin() instead of handling. + if (position_ < node_->start()) + { + *this = save; + } + } + else + { + assert(position_ >= node_->start()); + node_ = node_->child(static_cast(position_)); + while (node_->is_internal()) + { + node_ = node_->child(node_->finish()); + } + position_ = node_->finish() - 1; + } + } + + //// + // btree methods + template + template + void btree

::copy_or_move_values_in_order(Btree& other) + { + static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = other.begin(); + if (iter == other.end()) + return; + insert_multi(iter.slot()); + ++iter; + for (; iter != other.end(); ++iter) + { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), iter.slot()); + } + } + + template + constexpr bool btree

::static_assert_validation() + { + static_assert(std::is_nothrow_copy_constructible::value, "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, "Allocator must be nothrow copy constructible"); + static_assert(std::is_trivially_copyable::value, "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large" + ); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + static_assert( + compare_has_valid_result_type(), + "key comparison function must return absl::{weak,strong}_ordering or " + "bool." + ); + + // Test the assumption made in setting kNodeSlotSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void*) + 4, "node space assumption incorrect"); + + return true; + } + + template + template + auto btree

::lower_bound_equal(const K& key) const + -> std::pair + { + const SearchResult res = + internal_lower_bound(key); + const iterator lower = iterator(internal_end(res.value)); + const bool equal = res.HasMatch() ? res.IsEq() : lower != end() && !compare_keys(key, lower.key()); + return {lower, equal}; + } + + template + template + auto btree

::equal_range(const K& key) -> std::pair + { + const std::pair lower_and_equal = lower_bound_equal(key); + const iterator lower = lower_and_equal.first; + if (!lower_and_equal.second) + { + return {lower, lower}; + } + + const iterator next = std::next(lower); + if (!params_type::template can_have_multiple_equivalent_keys()) + { + // The next iterator after lower must point to a key greater than `key`. + // Note: if this assert fails, then it may indicate that the comparator does + // not meet the equivalence requirements for Compare + // (see https://en.cppreference.com/w/cpp/named_req/Compare). + assert(next == end() || compare_keys(key, next.key())); + return {lower, next}; + } + // Try once more to avoid the call to upper_bound() if there's only one + // equivalent key. This should prevent all calls to upper_bound() in cases of + // unique-containers with heterogeneous comparators in which all comparison + // operators have the same equivalence classes. + if (next == end() || compare_keys(key, next.key())) + return {lower, next}; + + // In this case, we need to call upper_bound() to avoid worst case O(N) + // behavior if we were to iterate over equal keys. + return {lower, upper_bound(key)}; + } + + template + template + auto btree

::insert_unique(const K& key, Args&&... args) + -> std::pair + { + if (empty()) + { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + SearchResult res = internal_locate(key); + iterator iter = res.value; + + if (res.HasMatch()) + { + if (res.IsEq()) + { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } + else + { + iterator last = internal_last(iter); + if (last.node_ && !compare_keys(key, last.key())) + { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; + } + + template + template + inline auto btree

::insert_hint_unique(iterator position, const K& key, Args&&... args) + -> std::pair + { + if (!empty()) + { + if (position == end() || compare_keys(key, position.key())) + { + if (position == begin() || compare_keys(std::prev(position).key(), key)) + { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } + else if (compare_keys(position.key(), key)) + { + ++position; + if (position == end() || compare_keys(key, position.key())) + { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } + else + { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); + } + + template + template + void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) + { + for (; b != e; ++b) + { + insert_hint_unique(end(), params_type::key(*b), *b); + } + } + + template + template + void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) + { + for (; b != e; ++b) + { + // Use a node handle to manage a temp slot. + auto node_handle = + CommonAccess::Construct(get_allocator(), *b); + slot_type* slot = CommonAccess::GetSlot(node_handle); + insert_hint_unique(end(), params_type::key(slot), slot); + } + } + + template + template + auto btree

::insert_multi(const key_type& key, ValueType&& v) -> iterator + { + if (empty()) + { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node_ == nullptr) + { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); + } + + template + template + auto btree

::insert_hint_multi(iterator position, ValueType&& v) -> iterator + { + if (!empty()) + { + const key_type& key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) + { + if (position == begin() || + !compare_keys(key, std::prev(position).key())) + { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } + else + { + ++position; + if (position == end() || !compare_keys(position.key(), key)) + { + // {original `position`}.key() < key < {current `position`}.key() + return internal_emplace(position, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); + } + + template + template + void btree

::insert_iterator_multi(InputIterator b, InputIterator e) + { + for (; b != e; ++b) + { + insert_hint_multi(end(), *b); + } + } + + template + auto btree

::operator=(const btree& other) -> btree& + { + if (this != &other) + { + clear(); + + *mutable_key_comp() = other.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) + { + *mutable_allocator() = other.allocator(); + } + + copy_or_move_values_in_order(other); + } + return *this; + } + + template + auto btree

::operator=(btree&& other) noexcept -> btree& + { + if (this != &other) + { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) + { + swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); + } + else + { + if (allocator() == other.allocator()) + { + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(size_, other.size_); + } + else + { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `other` and `this` to have `other`s key + // comparator while moving the values so we can't swap the key + // comparators. + *mutable_key_comp() = other.key_comp(); + copy_or_move_values_in_order(other); + } + } + } + return *this; + } + + template + auto btree

::erase(iterator iter) -> iterator + { + iter.node_->value_destroy(static_cast(iter.position_), mutable_allocator()); + iter.update_generation(); + + const bool internal_delete = iter.node_->is_internal(); + if (internal_delete) + { + // Deletion of a value on an internal node. First, transfer the largest + // value from our left child here, then erase/rebalance from that position. + // We can get to the largest value from our left child by decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node_->is_leaf()); + internal_iter.node_->transfer( + static_cast(internal_iter.position_), + static_cast(iter.position_), + iter.node_, + mutable_allocator() + ); + } + else + { + // Shift values after erased position in leaf. In the internal case, we + // don't need to do this because the leaf position is the end of the node. + const field_type transfer_from = + static_cast(iter.position_ + 1); + const field_type num_to_transfer = iter.node_->finish() - transfer_from; + iter.node_->transfer_n(num_to_transfer, static_cast(iter.position_), transfer_from, iter.node_, mutable_allocator()); + } + // Update node finish and container size. + iter.node_->set_finish(iter.node_->finish() - 1); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node_) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) + { + ++res; + } + return res; + } + + template + auto btree

::rebalance_after_delete(iterator iter) -> iterator + { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) + { + if (iter.node_ == root()) + { + try_shrink(); + if (empty()) + { + return end(); + } + break; + } + if (iter.node_->count() >= kMinNodeValues) + { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) + { + res = iter; + first_iteration = false; + } + if (!merged) + { + break; + } + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + } + res.update_generation(); + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position_ == res.node_->finish()) + { + res.position_ = res.node_->finish() - 1; + ++res; + } + + return res; + } + + template + auto btree

::erase_range(iterator begin, iterator end) + -> std::pair + { + size_type count = static_cast(end - begin); + assert(count >= 0); + + if (count == 0) + { + return {0, begin}; + } + + if (static_cast(count) == size_) + { + clear(); + return {count, this->end()}; + } + + if (begin.node_ == end.node_) + { + assert(end.position_ > begin.position_); + begin.node_->remove_values( + static_cast(begin.position_), + static_cast(end.position_ - begin.position_), + mutable_allocator() + ); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) + { + if (begin.node_->is_leaf()) + { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = + static_cast(begin.node_->finish() - begin.position_); + const field_type to_erase = static_cast( + (std::min)(remaining_to_erase, remaining_in_node) + ); + begin.node_->remove_values(static_cast(begin.position_), to_erase, mutable_allocator()); + size_ -= to_erase; + begin = rebalance_after_delete(begin); + } + else + { + begin = erase(begin); + } + } + begin.update_generation(); + return {count, begin}; + } + + template + void btree

::clear() + { + if (!empty()) + { + node_type::clear_and_delete(root(), mutable_allocator()); + } + mutable_root() = mutable_rightmost() = EmptyNode(); + size_ = 0; + } + + template + void btree

::swap(btree& other) + { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) + { + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + } + else + { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == other.allocator()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + } + swap(mutable_root(), other.mutable_root()); + swap(size_, other.size_); + } + + template + void btree

::verify() const + { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost() != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost()->is_leaf()); + } + + template + void btree

::rebalance_or_split(iterator* iter) + { + node_type*& node = iter->node_; + int& insert_position = iter->position_; + assert(node->count() == node->max_count()); + assert(kNodeSlots == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type* parent = node->parent(); + if (node != root()) + { + if (node->position() > parent->start()) + { + // Try rebalancing with our left sibling. + node_type* left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) + { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + field_type to_move = + (kNodeSlots - left->count()) / + (1 + (static_cast(insert_position) < kNodeSlots)); + to_move = (std::max)(field_type{1}, to_move); + + if (static_cast(insert_position) - to_move >= + node->start() || + left->count() + to_move < kNodeSlots) + { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = static_cast( + static_cast(insert_position) - to_move + ); + if (insert_position < node->start()) + { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->finish()) + { + // Try rebalancing with our right sibling. + node_type* right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) + { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + field_type to_move = (kNodeSlots - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(field_type{1}, to_move); + + if (static_cast(insert_position) <= + node->finish() - to_move || + right->count() + to_move < kNodeSlots) + { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->finish()) + { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) + { + iterator parent_iter(parent, node->position()); + rebalance_or_split(&parent_iter); + parent = node->parent(); + } + } + else + { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(/*position=*/0, parent); + parent->set_generation(root()->generation()); + parent->init_child(parent->start(), node); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(parent->start_child()->is_internal() || parent->start_child() == rightmost()); + } + + // Split the node. + node_type* split_node; + if (node->is_leaf()) + { + split_node = new_leaf_node(node->position() + 1, parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost() == node) + mutable_rightmost() = split_node; + } + else + { + split_node = new_internal_node(node->position() + 1, parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->finish()) + { + insert_position = insert_position - node->count() - 1; + node = split_node; + } + } + + template + void btree

::merge_nodes(node_type* left, node_type* right) + { + left->merge(right, mutable_allocator()); + if (rightmost() == right) + mutable_rightmost() = left; + } + + template + bool btree

::try_merge_or_rebalance(iterator* iter) + { + node_type* parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) + { + // Try merging with our left sibling. + node_type* left = parent->child(iter->node_->position() - 1); + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node_->count() <= kNodeSlots) + { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; + return true; + } + } + if (iter->node_->position() < parent->finish()) + { + // Try merging with our right sibling. + node_type* right = parent->child(iter->node_->position() + 1); + assert(right->max_count() == kNodeSlots); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) + { + merge_nodes(iter->node_, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if (right->count() > kMinNodeValues && + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) + { + field_type to_move = (right->count() - iter->node_->count()) / 2; + to_move = + (std::min)(to_move, static_cast(right->count() - 1)); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node_->position() > parent->start()) + { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type* left = parent->child(iter->node_->position() - 1); + if (left->count() > kMinNodeValues && + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) + { + field_type to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, static_cast(left->count() - 1)); + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; + return false; + } + } + return false; + } + + template + void btree

::try_shrink() + { + node_type* orig_root = root(); + if (orig_root->count() > 0) + { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (orig_root->is_leaf()) + { + assert(size() == 0); + mutable_root() = mutable_rightmost() = EmptyNode(); + } + else + { + node_type* child = orig_root->start_child(); + child->make_root(); + mutable_root() = child; + } + node_type::clear_and_delete(orig_root, mutable_allocator()); + } + + template + template + inline IterType btree

::internal_last(IterType iter) + { + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) + { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) + { + iter.node_ = nullptr; + break; + } + } + iter.update_generation(); + return iter; + } + + template + template + inline auto btree

::internal_emplace(iterator iter, Args&&... args) + -> iterator + { + if (iter.node_->is_internal()) + { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position_; + } + const field_type max_count = iter.node_->max_count(); + allocator_type* alloc = mutable_allocator(); + + const auto transfer_and_delete = [&](node_type* old_node, + node_type* new_node) + { + new_node->transfer_n(old_node->count(), new_node->start(), old_node->start(), old_node, alloc); + new_node->set_finish(old_node->finish()); + old_node->set_finish(old_node->start()); + new_node->set_generation(old_node->generation()); + node_type::clear_and_delete(old_node, alloc); + }; + const auto replace_leaf_root_node = [&](field_type new_node_size) + { + assert(iter.node_ == root()); + node_type* old_root = iter.node_; + node_type* new_root = iter.node_ = new_leaf_root_node(new_node_size); + transfer_and_delete(old_root, new_root); + mutable_root() = mutable_rightmost() = new_root; + }; + + bool replaced_node = false; + if (iter.node_->count() == max_count) + { + // Make room in the leaf for the new item. + if (max_count < kNodeSlots) + { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + replace_leaf_root_node(static_cast( + (std::min)(static_cast(kNodeSlots), 2 * max_count) + )); + replaced_node = true; + } + else + { + rebalance_or_split(&iter); + } + } + (void)replaced_node; +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!replaced_node) + { + assert(iter.node_->is_leaf()); + if (iter.node_->is_root()) + { + replace_leaf_root_node(max_count); + } + else + { + node_type* old_node = iter.node_; + const bool was_rightmost = rightmost() == old_node; + const bool was_leftmost = leftmost() == old_node; + node_type* parent = old_node->parent(); + const field_type position = old_node->position(); + node_type* new_node = iter.node_ = new_leaf_node(position, parent); + parent->set_child_noupdate_position(position, new_node); + transfer_and_delete(old_node, new_node); + if (was_rightmost) + mutable_rightmost() = new_node; + // The leftmost node is stored as the parent of the root node. + if (was_leftmost) + root()->set_parent(new_node); + } + } +#endif + iter.node_->emplace_value(static_cast(iter.position_), alloc, std::forward(args)...); + assert( + iter.node_->is_ordered_correctly(static_cast(iter.position_), original_key_compare(key_comp())) && + "If this assert fails, then either (1) the comparator may violate " + "transitivity, i.e. comp(a,b) && comp(b,c) -> comp(a,c) (see " + "https://en.cppreference.com/w/cpp/named_req/Compare), or (2) a " + "key may have been mutated after it was inserted into the tree." + ); + ++size_; + iter.update_generation(); + return iter; + } + + template + template + inline auto btree

::internal_locate(const K& key) const + -> SearchResult + { + iterator iter(const_cast(root())); + for (;;) + { + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); + if (res.IsEq()) + { + return {iter, MatchKind::kEq}; + } + // Note: in the non-key-compare-to case, we don't need to walk all the way + // down the tree if the keys are equal, but determining equality would + // require doing an extra comparison on each node on the way down, and we + // will need to go all the way to the leaf node in the expected case. + if (iter.node_->is_leaf()) + { + break; + } + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + // Note: in the non-key-compare-to case, the key may actually be equivalent + // here (and the MatchKind::kNe is ignored). + return {iter, MatchKind::kNe}; + } + + template + template + auto btree

::internal_lower_bound(const K& key) const + -> SearchResult + { + if (!params_type::template can_have_multiple_equivalent_keys()) + { + SearchResult ret = internal_locate(key); + ret.value = internal_last(ret.value); + return ret; + } + iterator iter(const_cast(root())); + SearchResult res; + bool seen_eq = false; + for (;;) + { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); + if (iter.node_->is_leaf()) + { + break; + } + seen_eq = seen_eq || res.IsEq(); + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + if (res.IsEq()) + return {iter, MatchKind::kEq}; + return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; + } + + template + template + auto btree

::internal_upper_bound(const K& key) const -> iterator + { + iterator iter(const_cast(root())); + for (;;) + { + iter.position_ = static_cast(iter.node_->upper_bound(key, key_comp())); + if (iter.node_->is_leaf()) + { + break; + } + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + return internal_last(iter); + } + + template + template + auto btree

::internal_find(const K& key) const -> iterator + { + SearchResult res = internal_locate(key); + if (res.HasMatch()) + { + if (res.IsEq()) + { + return res.value; + } + } + else + { + const iterator iter = internal_last(res.value); + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) + { + return iter; + } + } + return {nullptr, 0}; + } + + template + typename btree

::size_type btree

::internal_verify( + const node_type* node, const key_type* lo, const key_type* hi + ) const + { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) + { + assert(!compare_keys(node->key(node->start()), *lo)); + } + if (hi) + { + assert(!compare_keys(*hi, node->key(node->finish() - 1))); + } + for (int i = node->start() + 1; i < node->finish(); ++i) + { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + size_type count = node->count(); + if (node->is_internal()) + { + for (field_type i = node->start(); i <= node->finish(); ++i) + { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify(node->child(i), i == node->start() ? lo : &node->key(i - 1), i == node->finish() ? hi : &node->key(i)); + } + } + return count; + } + + struct btree_access + { + template + static auto erase_if(BtreeContainer& container, Pred pred) -> + typename BtreeContainer::size_type + { + const auto initial_size = container.size(); + auto& tree = container.tree_; + auto* alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) + { + if (!pred(*it)) + { + ++it; + continue; + } + auto* node = it.node_; + if (node->is_internal()) + { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) + { + it.update_generation(); + if (pred(*it)) + { + node->value_destroy(it.position_, alloc); + } + else + { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } + }; + +#undef ABSL_BTREE_ENABLE_GENERATIONS + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h new file mode 100644 index 00000000..56f39982 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h @@ -0,0 +1,891 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // A common base class for btree_set, btree_map, btree_multiset, and + // btree_multimap. + template + class btree_container + { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::template type< + K, + typename Tree::key_type>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::original_key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + struct extract_and_get_next_return_type + { + node_type node; + iterator next; + }; + + // Constructors/assignments. + btree_container() : + tree_(key_compare(), allocator_type()) + { + } + explicit btree_container(const key_compare& comp, const allocator_type& alloc = allocator_type()) : + tree_(comp, alloc) + { + } + explicit btree_container(const allocator_type& alloc) : + tree_(key_compare(), alloc) + { + } + + btree_container(const btree_container& other) : + btree_container(other, absl::allocator_traits::select_on_container_copy_construction(other.get_allocator())) + { + } + btree_container(const btree_container& other, const allocator_type& alloc) : + tree_(other.tree_, alloc) + { + } + + btree_container(btree_container&& other) noexcept( + std::is_nothrow_move_constructible::value + ) = default; + btree_container(btree_container&& other, const allocator_type& alloc) : + tree_(std::move(other.tree_), alloc) + { + } + + btree_container& operator=(const btree_container& other) = default; + btree_container& operator=(btree_container&& other) noexcept( + std::is_nothrow_move_assignable::value + ) = default; + + // Iterator routines. + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.begin(); + } + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.begin(); + } + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.begin(); + } + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.end(); + } + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.end(); + } + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.end(); + } + reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rbegin(); + } + const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rbegin(); + } + const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rbegin(); + } + reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rend(); + } + const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rend(); + } + const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.rend(); + } + + // Lookup routines. + template + size_type count(const key_arg& key) const + { + auto equal_range = this->equal_range(key); + return equal_range.second - equal_range.first; + } + template + iterator find(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.find(key); + } + template + const_iterator find(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.find(key); + } + template + bool contains(const key_arg& key) const + { + return find(key) != end(); + } + template + iterator lower_bound(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.lower_bound(key); + } + template + const_iterator lower_bound(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.lower_bound(key); + } + template + iterator upper_bound(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.upper_bound(key); + } + template + const_iterator upper_bound(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.upper_bound(key); + } + template + std::pair equal_range(const key_arg& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.equal_range(key); + } + template + std::pair equal_range( + const key_arg& key + ) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.erase(iterator(iter)); + } + iterator erase(iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.erase(iter); + } + iterator erase(const_iterator first, const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return tree_.erase_range(iterator(first), iterator(last)).second; + } + template + size_type erase(const key_arg& key) + { + auto equal_range = this->equal_range(key); + return tree_.erase_range(equal_range.first, equal_range.second).first; + } + + // Extract routines. + extract_and_get_next_return_type extract_and_get_next(const_iterator position) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + // Note: we rely on erase() taking place after Construct(). + return {CommonAccess::Construct(get_allocator(), iterator(position).slot()), erase(position)}; + } + node_type extract(iterator position) + { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + auto node = + CommonAccess::Construct(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) + { + return extract(iterator(position)); + } + + // Utility routines. + ABSL_ATTRIBUTE_REINITIALIZES void clear() + { + tree_.clear(); + } + void swap(btree_container& other) + { + tree_.swap(other.tree_); + } + void verify() const + { + tree_.verify(); + } + + // Size routines. + size_type size() const + { + return tree_.size(); + } + size_type max_size() const + { + return tree_.max_size(); + } + bool empty() const + { + return tree_.empty(); + } + + friend bool operator==(const btree_container& x, const btree_container& y) + { + if (x.size() != y.size()) + return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container& x, const btree_container& y) + { + return !(x == y); + } + + friend bool operator<(const btree_container& x, const btree_container& y) + { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container& x, const btree_container& y) + { + return y < x; + } + + friend bool operator<=(const btree_container& x, const btree_container& y) + { + return !(y < x); + } + + friend bool operator>=(const btree_container& x, const btree_container& y) + { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const + { + return tree_.get_allocator(); + } + + // The key comparator used by the btree. + key_compare key_comp() const + { + return key_compare(tree_.key_comp()); + } + value_compare value_comp() const + { + return tree_.value_comp(); + } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container& b) + { + for (const auto& v : b) + { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + friend struct btree_access; + Tree tree_; + }; + + // A common base class for btree_set and btree_map. + template + class btree_set_container : public btree_container + { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() + { + } + + // Range constructors. + template + btree_set_container(InputIterator b, InputIterator e, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + super_type(comp, alloc) + { + insert(b, e); + } + template + btree_set_container(InputIterator b, InputIterator e, const allocator_type& alloc) : + btree_set_container(b, e, key_compare(), alloc) + { + } + + // Initializer list constructors. + btree_set_container(std::initializer_list init, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + btree_set_container(init.begin(), init.end(), comp, alloc) + { + } + btree_set_container(std::initializer_list init, const allocator_type& alloc) : + btree_set_container(init.begin(), init.end(), alloc) + { + } + + // Insertion routines. + std::pair insert(const value_type& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_unique(params_type::key(v), v); + } + std::pair insert(value_type&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + template + std::pair emplace(Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + auto* slot = CommonAccess::GetSlot(node); + return this->tree_.insert_unique(params_type::key(slot), slot); + } + iterator insert(const_iterator hint, const value_type& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), v) + .first; + } + iterator insert(const_iterator hint, value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) + .first; + } + template + iterator emplace_hint(const_iterator hint, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + auto* slot = CommonAccess::GetSlot(node); + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(slot), slot) + .first; + } + template + void insert(InputIterator b, InputIterator e) + { + this->tree_.insert_iterator_unique(b, e, 0); + } + void insert(std::initializer_list init) + { + this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); + } + insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (!node) + return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); + if (res.second) + { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } + else + { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (!node) + return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node) + ); + if (res.second) + CommonAccess::Destroy(&node); + return res.first; + } + + // Node extraction routines. + template + node_type extract(const key_arg& key) + { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container& src) + { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) + { + if (insert(std::move(params_type::element(src_it.slot()))).second) + { + src_it = src.erase(src_it); + } + else + { + ++src_it; + } + } + } + + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container&& src) + { + merge(src); + } + }; + + // Base class for btree_map. + template + class btree_map_container : public btree_set_container + { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + private: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() + { + } + + // Insertion routines. + // Note: the nullptr template arguments and extra `const M&` overloads allow + // for supporting bitfield arguments. + template + std::pair insert_or_assign(const key_arg& k, const M& obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(k, obj); + } + template + std::pair insert_or_assign(key_arg&& k, const M& obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(std::forward(k), obj); + } + template + std::pair insert_or_assign(const key_arg& k, M&& obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(k, std::forward(obj)); + } + template + std::pair insert_or_assign(key_arg&& k, M&& obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(std::forward(k), std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg& k, const M& obj) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_hint_impl(hint, k, obj); + } + template + iterator insert_or_assign(const_iterator hint, key_arg&& k, const M& obj) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_hint_impl(hint, std::forward(k), obj); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg& k, M&& obj) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_hint_impl(hint, k, std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, key_arg&& k, M&& obj) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_hint_impl(hint, std::forward(k), std::forward(obj)); + } + + template::value, int> = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_impl(k, std::forward(args)...); + } + template::value, int> = 0> + std::pair try_emplace(key_arg&& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, const key_arg& k, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_hint_impl(hint, k, std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, key_arg&& k, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_hint_impl(hint, std::forward(k), std::forward(args)...); + } + + template + mapped_type& operator[](const key_arg& k) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace(k).first->second; + } + template + mapped_type& operator[](key_arg&& k) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace(std::forward(k)).first->second; + } + + template + mapped_type& at(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template + const mapped_type& at(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + + private: + // Note: when we call `std::forward(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template + std::pair insert_or_assign_impl(K&& k, M&& obj) + { + const std::pair ret = + this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); + if (!ret.second) + ret.first->second = std::forward(obj); + return ret; + } + template + iterator insert_or_assign_hint_impl(const_iterator hint, K&& k, M&& obj) + { + const std::pair ret = this->tree_.insert_hint_unique( + iterator(hint), k, std::forward(k), std::forward(obj) + ); + if (!ret.second) + ret.first->second = std::forward(obj); + return ret.first; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) + { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...) + ); + } + template + iterator try_emplace_hint_impl(const_iterator hint, K&& k, Args&&... args) + { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)) + .first; + } + }; + + // A common base class for btree_multiset and btree_multimap. + template + class btree_multiset_container : public btree_container + { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() + { + } + + // Range constructors. + template + btree_multiset_container(InputIterator b, InputIterator e, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + super_type(comp, alloc) + { + insert(b, e); + } + template + btree_multiset_container(InputIterator b, InputIterator e, const allocator_type& alloc) : + btree_multiset_container(b, e, key_compare(), alloc) + { + } + + // Initializer list constructors. + btree_multiset_container(std::initializer_list init, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + btree_multiset_container(init.begin(), init.end(), comp, alloc) + { + } + btree_multiset_container(std::initializer_list init, const allocator_type& alloc) : + btree_multiset_container(init.begin(), init.end(), alloc) + { + } + + // Insertion routines. + iterator insert(const value_type& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_multi(v); + } + iterator insert(value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_multi(std::move(v)); + } + iterator insert(const_iterator hint, const value_type& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_hint_multi(iterator(hint), v); + } + iterator insert(const_iterator hint, value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); + } + template + void insert(InputIterator b, InputIterator e) + { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) + { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + return this->tree_.insert_multi(CommonAccess::GetSlot(node)); + } + template + iterator emplace_hint(const_iterator hint, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + return this->tree_.insert_hint_multi(iterator(hint), CommonAccess::GetSlot(node)); + } + iterator insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (!node) + return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (!node) + return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node))) + ); + CommonAccess::Destroy(&node); + return res; + } + + // Node extraction routines. + template + node_type extract(const key_arg& key) + { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container& src) + { // NOLINT + for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) + { + insert(std::move(params_type::element(src_it.slot()))); + } + src.clear(); + } + + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container&& src) + { + merge(src); + } + }; + + // A base class for btree_multimap. + template + class btree_multimap_container : public btree_multiset_container + { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() + { + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/common.h b/CAPI/cpp/grpc/include/absl/container/internal/common.h new file mode 100644 index 00000000..f2c44b29 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/common.h @@ -0,0 +1,258 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_H_ + +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + struct IsTransparent : std::false_type + { + }; + template + struct IsTransparent> : std::true_type + { + }; + + template + struct KeyArg + { + // Transparent. Forward `K`. + template + using type = K; + }; + + template<> + struct KeyArg + { + // Not transparent. Always use `key_type`. + template + using type = key_type; + }; + + // The node_handle concept from C++17. + // We specialize node_handle for sets and maps. node_handle_base holds the + // common API of both. + template + class node_handle_base + { + protected: + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() = default; + node_handle_base(node_handle_base&& other) noexcept + { + *this = std::move(other); + } + ~node_handle_base() + { + destroy(); + } + node_handle_base& operator=(node_handle_base&& other) noexcept + { + destroy(); + if (!other.empty()) + { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept + { + return !alloc_; + } + explicit operator bool() const noexcept + { + return !empty(); + } + allocator_type get_allocator() const + { + return *alloc_; + } + + protected: + friend struct CommonAccess; + + struct transfer_tag_t + { + }; + node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) : + alloc_(a) + { + PolicyTraits::transfer(alloc(), slot(), s); + } + + struct construct_tag_t + { + }; + template + node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) : + alloc_(a) + { + PolicyTraits::construct(alloc(), slot(), std::forward(args)...); + } + + void destroy() + { + if (!empty()) + { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() + { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const + { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() + { + return std::addressof(*alloc_); + } + + private: + absl::optional alloc_ = {}; + alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; + }; + + // For sets. + template + class node_handle : public node_handle_base + { + using Base = node_handle_base; + + public: + using value_type = typename PolicyTraits::value_type; + + constexpr node_handle() + { + } + + value_type& value() const + { + return PolicyTraits::element(this->slot()); + } + + private: + friend struct CommonAccess; + + using Base::Base; + }; + + // For maps. + template + class node_handle> : public node_handle_base + { + using Base = node_handle_base; + using slot_type = typename PolicyTraits::slot_type; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() + { + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key. Otherwise, we provide const access. + auto key() const + -> decltype(PolicyTraits::mutable_key(std::declval())) + { + return PolicyTraits::mutable_key(this->slot()); + } + + mapped_type& mapped() const + { + return PolicyTraits::value(&PolicyTraits::element(this->slot())); + } + + private: + friend struct CommonAccess; + + using Base::Base; + }; + + // Provide access to non-public node-handle functions. + struct CommonAccess + { + template + static auto GetSlot(const Node& node) -> decltype(node.slot()) + { + return node.slot(); + } + + template + static void Destroy(Node* node) + { + node->destroy(); + } + + template + static void Reset(Node* node) + { + node->reset(); + } + + template + static T Transfer(Args&&... args) + { + return T(typename T::transfer_tag_t{}, std::forward(args)...); + } + + template + static T Construct(Args&&... args) + { + return T(typename T::construct_tag_t{}, std::forward(args)...); + } + }; + + // Implement the insert_return_type<> concept of C++17. + template + struct InsertReturnType + { + Iterator position; + bool inserted; + NodeType node; + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COMMON_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/common_policy_traits.h b/CAPI/cpp/grpc/include/absl/container/internal/common_policy_traits.h new file mode 100644 index 00000000..c498dea1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/common_policy_traits.h @@ -0,0 +1,146 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Defines how slots are initialized/destroyed/moved. + template + struct common_policy_traits + { + // The actual object stored in the container. + using slot_type = typename Policy::slot_type; + using reference = decltype(Policy::element(std::declval())); + using value_type = typename std::remove_reference::type; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) + { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) + { + transfer_impl(alloc, new_slot, old_slot, Rank0{}); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + // Note: we use remove_const_t so that the two overloads have different args + // in the case of sets with explicitly const value_types. + template + static auto element(absl::remove_const_t* slot) + -> decltype(P::element(slot)) + { + return P::element(slot); + } + template + static auto element(const slot_type* slot) -> decltype(P::element(slot)) + { + return P::element(slot); + } + + static constexpr bool transfer_uses_memcpy() + { + return std::is_same>(nullptr, nullptr, nullptr, Rank0{})), std::true_type>::value; + } + + private: + // To rank the overloads below for overload resolution. Rank0 is preferred. + struct Rank2 + { + }; + struct Rank1 : Rank2 + { + }; + struct Rank0 : Rank1 + { + }; + + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, Rank0) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) + { + P::transfer(alloc, new_slot, old_slot); + } +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + // This overload returns true_type for the trait below. + // The conditional_t is to make the enabler type dependent. + template>::value>> + static std::true_type transfer_impl(Alloc*, slot_type* new_slot, slot_type* old_slot, Rank1) + { + // TODO(b/247130232): remove casts after fixing warnings. + // TODO(b/251814870): remove casts after fixing warnings. + std::memcpy( + static_cast(std::launder( + const_cast*>(&element(new_slot)) + )), + static_cast(&element(old_slot)), + sizeof(value_type) + ); + return {}; + } +#endif + + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, Rank2) + { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h new file mode 100644 index 00000000..986c8170 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h @@ -0,0 +1,321 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include +#include + +#include "absl/utility/utility.h" + +#if defined(_MSC_VER) && !defined(__NVCC__) +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class CompressedTuple; + + namespace internal_compressed_tuple + { + + template + struct Elem; + template + struct Elem, I> : std::tuple_element> + { + }; + template + using ElemT = typename Elem::type; + + // We can't use EBCO on other CompressedTuples because that would mean that we + // derive from multiple Storage<> instantiations with the same I parameter, + // and potentially from multiple identical Storage<> instantiations. So anytime + // we use type inheritance rather than encapsulation, we mark + // CompressedTupleImpl, to make this easy to detect. + struct uses_inheritance + { + }; + + template + constexpr bool ShouldUseBase() + { + return std::is_class::value && std::is_empty::value && + !std::is_final::value && + !std::is_base_of::value; + } + + // The storage class provides two specializations: + // - For empty classes, it stores T as a base class. + // - For everything else, it stores T as a member. + template()> + struct Storage + { + T value; + constexpr Storage() = default; + template + explicit constexpr Storage(absl::in_place_t, V&& v) : + value(absl::forward(v)) + { + } + constexpr const T& get() const& + { + return value; + } + T& get() & + { + return value; + } + constexpr const T&& get() const&& + { + return absl::move(*this).value; + } + T&& get() && + { + return std::move(*this).value; + } + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T + { + constexpr Storage() = default; + + template + explicit constexpr Storage(absl::in_place_t, V&& v) : + T(absl::forward(v)) + { + } + + constexpr const T& get() const& + { + return *this; + } + T& get() & + { + return *this; + } + constexpr const T&& get() const&& + { + return absl::move(*this); + } + T&& get() && + { + return std::move(*this); + } + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, + absl::index_sequence, + ShouldAnyUseBase> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : uses_inheritance, Storage::value>... + { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : + Storage(absl::in_place, absl::forward(args))... + { + } + friend CompressedTuple; + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, + absl::index_sequence, + false> + // We use the dummy identity function as above... + : Storage::value, false>... + { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : + Storage(absl::in_place, absl::forward(args))... + { + } + friend CompressedTuple; + }; + + std::false_type Or(std::initializer_list); + std::true_type Or(std::initializer_list); + + // MSVC requires this to be done separately rather than within the declaration + // of CompressedTuple below. + template + constexpr bool ShouldAnyUseBase() + { + return decltype(Or({std::integral_constant()>()...})){}; + } + + template + using TupleElementMoveConstructible = + typename std::conditional::value, std::is_convertible, std::is_constructible>::type; + + template + struct TupleMoveConstructible : std::false_type + { + }; + + template + struct TupleMoveConstructible, Vs...> : std::integral_constant...>::value> + { + }; + + template + struct compressed_tuple_size; + + template + struct compressed_tuple_size> : public std::integral_constant + { + }; + + template + struct TupleItemsMoveConstructible : std::integral_constant::value == sizeof...(Vs), T, Vs...>::value> + { + }; + + } // namespace internal_compressed_tuple + + // Helper class to perform the Empty Base Class Optimization. + // Ts can contain classes and non-classes, empty or not. For the ones that + // are empty classes, we perform the CompressedTuple. If all types in Ts are + // empty classes, then CompressedTuple is itself an empty class. (This + // does not apply when one or more of those empty classes is itself an empty + // CompressedTuple.) + // + // To access the members, use member .get() function. + // + // Eg: + // absl::container_internal::CompressedTuple value(7, t1, t2, + // t3); + // assert(value.get<0>() == 7); + // T1& t1 = value.get<1>(); + // const T2& t2 = value.get<2>(); + // ... + // + // https://en.cppreference.com/w/cpp/language/ebo + template + class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple : private internal_compressed_tuple::CompressedTupleImpl, absl::index_sequence_for, internal_compressed_tuple::ShouldAnyUseBase()> + { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + template + using StorageT = internal_compressed_tuple::Storage, I>; + + public: + // There seems to be a bug in MSVC dealing in which using '=default' here will + // cause the compiler to ignore the body of other constructors. The work- + // around is to explicitly implement the default constructor. +#if defined(_MSC_VER) + constexpr CompressedTuple() : + CompressedTuple::CompressedTupleImpl() + { + } +#else + constexpr CompressedTuple() = default; +#endif + explicit constexpr CompressedTuple(const Ts&... base) : + CompressedTuple::CompressedTupleImpl(absl::in_place, base...) + { + } + + template)>>, + internal_compressed_tuple::TupleItemsMoveConstructible, First, Vs...>>::value, + bool> = true> + explicit constexpr CompressedTuple(First&& first, Vs&&... base) : + CompressedTuple::CompressedTupleImpl(absl::in_place, absl::forward(first), absl::forward(base)...) + { + } + + template + ElemT& get() & + { + return StorageT::get(); + } + + template + constexpr const ElemT& get() const& + { + return StorageT::get(); + } + + template + ElemT&& get() && + { + return std::move(*this).StorageT::get(); + } + + template + constexpr const ElemT&& get() const&& + { + return absl::move(*this).StorageT::get(); + } + }; + + // Explicit specialization for a zero-element tuple + // (needed to avoid ambiguous overloads for the default constructor). + template<> + class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> + { + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h new file mode 100644 index 00000000..91505317 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h @@ -0,0 +1,510 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + struct alignas(Alignment) AlignedType + { + }; + + // Allocates at least n bytes aligned to the specified alignment. + // Alignment must be a power of 2. It must be positive. + // + // Note that many allocators don't honor alignment requirements above certain + // threshold (usually either alignof(std::max_align_t) or alignof(void*)). + // Allocate() doesn't apply alignment corrections. If the underlying allocator + // returns insufficiently alignment pointer, that's what you are going to get. + template + void* Allocate(Alloc* alloc, size_t n) + { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && "allocator does not respect alignment"); + return p; + } + + // The pointer must have been previously obtained by calling + // Allocate(alloc, n). + template + void Deallocate(Alloc* alloc, void* p, size_t n) + { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + AT::deallocate(my_mem_alloc, static_cast(p), (n + sizeof(M) - 1) / sizeof(M)); + } + + namespace memory_internal + { + + // Constructs T into uninitialized storage pointed by `ptr` using the args + // specified in the tuple. + template + void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, absl::index_sequence) + { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))... + ); + } + + template + struct WithConstructedImplF + { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args + ) const + { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; + }; + + template + decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f + ) + { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))... + ); + } + + template + auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) + { + return std::forward_as_tuple(std::get(std::forward(t))...); + } + + // Returns a tuple of references to the elements of the input tuple. T must be a + // tuple. + template + auto TupleRef(T&& t) -> decltype(TupleRefImpl(std::forward(t), absl::make_index_sequence::type>::value>())) + { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>() + ); + } + + template + decltype(std::declval()(std::declval(), std::piecewise_construct, std::declval>(), std::declval())) + DecomposePairImpl(F&& f, std::pair, V> p) + { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); + } + + } // namespace memory_internal + + // Constructs T into uninitialized storage pointed by `ptr` using the args + // specified in the tuple. + template + void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) + { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), absl::make_index_sequence::type>::value>() + ); + } + + // Constructs T using the args specified in the tuple and calls F with the + // constructed value. + template + decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f + ) + { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f) + ); + } + + // Given arguments of an std::pair's constructor, PairArgs() returns a pair of + // tuples with references to the passed arguments. The tuples contain + // constructor arguments for the first and the second elements of the pair. + // + // The following two snippets are equivalent. + // + // 1. std::pair p(args...); + // + // 2. auto a = PairArgs(args...); + // std::pair p(std::piecewise_construct, + // std::move(a.first), std::move(a.second)); + inline std::pair, std::tuple<>> PairArgs() + { + return {}; + } + template + std::pair, std::tuple> PairArgs(F&& f, S&& s) + { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), std::forward_as_tuple(std::forward(s))}; + } + template + std::pair, std::tuple> PairArgs( + const std::pair& p + ) + { + return PairArgs(p.first, p.second); + } + template + std::pair, std::tuple> PairArgs(std::pair&& p) + { + return PairArgs(std::forward(p.first), std::forward(p.second)); + } + template + auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s)))) + { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s))); + } + + // A helper function for implementing apply() in map policies. + template + auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...) + )) + { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...) + ); + } + + // A helper function for implementing apply() in set policies. + template + decltype(std::declval()(std::declval(), std::declval())) + DecomposeValue(F&& f, Arg&& arg) + { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); + } + + // Helper functions for asan and msan. + inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) + { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; + } + + inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) + { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; + } + + template + inline void SanitizerPoisonObject(const T* object) + { + SanitizerPoisonMemoryRegion(object, sizeof(T)); + } + + template + inline void SanitizerUnpoisonObject(const T* object) + { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); + } + + namespace memory_internal + { + + // If Pair is a standard-layout type, OffsetOf::kFirst and + // OffsetOf::kSecond are equivalent to offsetof(Pair, first) and + // offsetof(Pair, second) respectively. Otherwise they are -1. + // + // The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout + // type, which is non-portable. + template + struct OffsetOf + { + static constexpr size_t kFirst = static_cast(-1); + static constexpr size_t kSecond = static_cast(-1); + }; + + template + struct OffsetOf::type> + { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); + }; + + template + struct IsLayoutCompatible + { + private: + struct Pair + { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() + { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); + }; + + } // namespace memory_internal + + // The internal storage type for key-value containers like flat_hash_map. + // + // It is convenient for the value_type of a flat_hash_map to be + // pair; the "const K" prevents accidental modification of the key + // when dealing with the reference returned from find() and similar methods. + // However, this creates other problems; we want to be able to emplace(K, V) + // efficiently with move operations, and similarly be able to move a + // pair in insert(). + // + // The solution is this union, which aliases the const and non-const versions + // of the pair. This also allows flat_hash_map to work, even though + // that has the same efficiency issues with move in emplace() and insert() - + // but people do it anyway. + // + // If kMutableKeys is false, only the value member can be accessed. + // + // If kMutableKeys is true, key can be accessed through all slots while value + // and mutable_value must be accessed only via INITIALIZED slots. Slots are + // created and destroyed via mutable_value so that the key can be moved later. + // + // Accessing one of the union fields while the other is active is safe as + // long as they are layout-compatible, which is guaranteed by the definition of + // kMutableKeys. For C++11, the relevant section of the standard is + // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) + template + union map_slot_type + { + map_slot_type() + { + } + ~map_slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + value_type value; + mutable_value_type mutable_value; + absl::remove_const_t key; + }; + + template + struct map_slot_policy + { + using slot_type = map_slot_type; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + private: + static void emplace(slot_type* slot) + { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = memory_internal::IsLayoutCompatible; + + public: + static value_type& element(slot_type* slot) + { + return slot->value; + } + static const value_type& element(const slot_type* slot) + { + return slot->value; + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + static K& mutable_key(slot_type* slot) + { + // Still check for kMutableKeys so that we can avoid calling std::launder + // unless necessary because it can interfere with optimizations. + return kMutableKeys::value ? slot->key : *std::launder(const_cast(std::addressof(slot->value.first))); + } +#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) + static const K& mutable_key(slot_type* slot) + { + return key(slot); + } +#endif + + static const K& key(const slot_type* slot) + { + return kMutableKeys::value ? slot->key : slot->value.first; + } + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + emplace(slot); + if (kMutableKeys::value) + { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, std::forward(args)...); + } + else + { + absl::allocator_traits::construct(*alloc, &slot->value, std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) + { + emplace(slot); + if (kMutableKeys::value) + { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value) + ); + } + else + { + absl::allocator_traits::construct(*alloc, &slot->value, std::move(other->value)); + } + } + + // Construct this slot by copying from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, const slot_type* other) + { + emplace(slot); + absl::allocator_traits::construct(*alloc, &slot->value, other->value); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + if (kMutableKeys::value) + { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } + else + { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) + { + emplace(new_slot); +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + if (absl::is_trivially_relocatable()) + { + // TODO(b/247130232,b/251814870): remove casts after fixing warnings. + std::memcpy(static_cast(std::launder(&new_slot->value)), static_cast(&old_slot->value), sizeof(value_type)); + return; + } +#endif + + if (kMutableKeys::value) + { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value) + ); + } + else + { + absl::allocator_traits::construct(*alloc, &new_slot->value, std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h new file mode 100644 index 00000000..01c5dd3a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h @@ -0,0 +1,144 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ +#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // This is a stateful allocator, but the state lives outside of the + // allocator (in whatever test is using the allocator). This is odd + // but helps in tests where the allocator is propagated into nested + // containers - that chain of allocators uses the same state and is + // thus easier to query for aggregate allocation information. + template + class CountingAllocator + { + public: + using Allocator = std::allocator; + using AllocatorTraits = std::allocator_traits; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + CountingAllocator() = default; + explicit CountingAllocator(int64_t* bytes_used) : + bytes_used_(bytes_used) + { + } + CountingAllocator(int64_t* bytes_used, int64_t* instance_count) : + bytes_used_(bytes_used), + instance_count_(instance_count) + { + } + + template + CountingAllocator(const CountingAllocator& x) : + bytes_used_(x.bytes_used_), + instance_count_(x.instance_count_) + { + } + + pointer allocate( + size_type n, + typename AllocatorTraits::const_void_pointer hint = nullptr + ) + { + Allocator allocator; + pointer ptr = AllocatorTraits::allocate(allocator, n, hint); + if (bytes_used_ != nullptr) + { + *bytes_used_ += n * sizeof(T); + } + return ptr; + } + + void deallocate(pointer p, size_type n) + { + Allocator allocator; + AllocatorTraits::deallocate(allocator, p, n); + if (bytes_used_ != nullptr) + { + *bytes_used_ -= n * sizeof(T); + } + } + + template + void construct(U* p, Args&&... args) + { + Allocator allocator; + AllocatorTraits::construct(allocator, p, std::forward(args)...); + if (instance_count_ != nullptr) + { + *instance_count_ += 1; + } + } + + template + void destroy(U* p) + { + Allocator allocator; + // Ignore GCC warning bug. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuse-after-free" +#endif + AllocatorTraits::destroy(allocator, p); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + if (instance_count_ != nullptr) + { + *instance_count_ -= 1; + } + } + + template + class rebind + { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, const CountingAllocator& b) + { + return a.bytes_used_ == b.bytes_used_ && + a.instance_count_ == b.instance_count_; + } + + friend bool operator!=(const CountingAllocator& a, const CountingAllocator& b) + { + return !(a == b); + } + + int64_t* bytes_used_ = nullptr; + int64_t* instance_count_ = nullptr; + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h new file mode 100644 index 00000000..7189b8ec --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,257 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash +// function is hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" + +#ifdef ABSL_HAVE_STD_STRING_VIEW +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // The hash of an object of type T is computed by using absl::Hash. + template + struct HashEq + { + using Hash = absl::Hash; + using Eq = std::equal_to; + }; + + struct StringHash + { + using is_transparent = void; + + size_t operator()(absl::string_view v) const + { + return absl::Hash{}(v); + } + size_t operator()(const absl::Cord& v) const + { + return absl::Hash{}(v); + } + }; + + struct StringEq + { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const + { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return lhs == rhs; + } + }; + + // Supports heterogeneous lookup for string-like elements. + struct StringHashEq + { + using Hash = StringHash; + using Eq = StringEq; + }; + + template<> + struct HashEq : StringHashEq + { + }; + template<> + struct HashEq : StringHashEq + { + }; + template<> + struct HashEq : StringHashEq + { + }; + +#ifdef ABSL_HAVE_STD_STRING_VIEW + + template + struct BasicStringHash + { + using is_transparent = void; + + size_t operator()(std::basic_string_view v) const + { + return absl::Hash>{}(v); + } + }; + + template + struct BasicStringEq + { + using is_transparent = void; + bool operator()(std::basic_string_view lhs, std::basic_string_view rhs) const + { + return lhs == rhs; + } + }; + + // Supports heterogeneous lookup for w/u16/u32 string + string_view + char*. + template + struct BasicStringHashEq + { + using Hash = BasicStringHash; + using Eq = BasicStringEq; + }; + + template<> + struct HashEq : BasicStringHashEq + { + }; + template<> + struct HashEq : BasicStringHashEq + { + }; + template<> + struct HashEq : BasicStringHashEq + { + }; + template<> + struct HashEq : BasicStringHashEq + { + }; + template<> + struct HashEq : BasicStringHashEq + { + }; + template<> + struct HashEq : BasicStringHashEq + { + }; + +#endif // ABSL_HAVE_STD_STRING_VIEW + + // Supports heterogeneous lookup for pointers and smart pointers. + template + struct HashEq + { + struct Hash + { + using is_transparent = void; + template + size_t operator()(const U& ptr) const + { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq + { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const + { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) + { + return ptr; + } + template + static const T* ToPtr(const std::unique_ptr& ptr) + { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) + { + return ptr.get(); + } + }; + + template + struct HashEq> : HashEq + { + }; + template + struct HashEq> : HashEq + { + }; + + // This header's visibility is restricted. If you need to access the default + // hasher please use the container's ::hasher alias instead. + // + // Example: typename Hash = typename absl::flat_hash_map::hasher + template + using hash_default_hash = typename container_internal::HashEq::Hash; + + // This header's visibility is restricted. If you need to access the default + // key equal please use the container's ::key_equal alias instead. + // + // Example: typename Eq = typename absl::flat_hash_map::key_equal + template + using hash_default_eq = typename container_internal::HashEq::Eq; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h new file mode 100644 index 00000000..a5657481 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,209 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hash_internal + { + namespace generator_internal + { + + template + struct IsMap : std::false_type + { + }; + + template + struct IsMap> : std::true_type + { + }; + + } // namespace generator_internal + + std::mt19937_64* GetSharedRng(); + + enum Enum + { + kEnumEmpty, + kEnumDeleted, + }; + + enum class EnumClass : uint64_t + { + kEmpty, + kDeleted, + }; + + inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) + { + return o << static_cast(ec); + } + + template + struct Generator; + + template + struct Generator::value>::type> + { + T operator()() const + { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } + }; + + template<> + struct Generator + { + Enum operator()() const + { + std::uniform_int_distribution::type> + dist; + while (true) + { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } + }; + + template<> + struct Generator + { + EnumClass operator()() const + { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) + { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } + }; + + template<> + struct Generator + { + std::string operator()() const; + }; + + template<> + struct Generator + { + absl::string_view operator()() const; + }; + + template<> + struct Generator + { + NonStandardLayout operator()() const + { + return NonStandardLayout(Generator()()); + } + }; + + template + struct Generator> + { + std::pair operator()() const + { + return std::pair(Generator::type>()(), Generator::type>()()); + } + }; + + template + struct Generator> + { + std::tuple operator()() const + { + return std::tuple(Generator::type>()()...); + } + }; + + template + struct Generator> + { + std::unique_ptr operator()() const + { + return absl::make_unique(Generator()()); + } + }; + + template + struct Generator().key()), decltype(std::declval().value())>> : Generator().key())>::type, typename std::decay().value())>::type>> + { + }; + + template + using GeneratedType = decltype(std::declval::value, typename Container::value_type, typename Container::key_type>::type>&>()()); + + // Naive wrapper that performs a linear search of previous values. + // Beware this is O(SQR), which is reasonable for smaller kMaxValues. + template + struct UniqueGenerator + { + Generator gen; + std::vector values; + + T operator()() + { + assert(values.size() < kMaxValues); + for (;;) + { + T value = gen(); + if (std::find(values.begin(), values.end(), value) == values.end()) + { + values.push_back(value); + return value; + } + } + } + }; + + } // namespace hash_internal + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h new file mode 100644 index 00000000..5577ab7f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,240 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hash_testing_internal + { + + template + struct WithId + { + WithId() : + id_(next_id()) + { + } + WithId(const WithId& that) : + id_(that.id_) + { + } + WithId(WithId&& that) : + id_(that.id_) + { + that.id_ = 0; + } + WithId& operator=(const WithId& that) + { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) + { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const + { + return id_; + } + + friend bool operator==(const WithId& a, const WithId& b) + { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) + { + return !(a == b); + } + + protected: + explicit WithId(size_t id) : + id_(id) + { + } + + private: + size_t id_; + + template + static size_t next_id() + { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } + }; + + } // namespace hash_testing_internal + + struct NonStandardLayout + { + NonStandardLayout() + { + } + explicit NonStandardLayout(std::string s) : + value(std::move(s)) + { + } + virtual ~NonStandardLayout() + { + } + + friend bool operator==(const NonStandardLayout& a, const NonStandardLayout& b) + { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, const NonStandardLayout& b) + { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) + { + return H::combine(std::move(h), v.value); + } + + std::string value; + }; + + struct StatefulTestingHash : absl::container_internal::hash_testing_internal::WithId + { + template + size_t operator()(const T& t) const + { + return absl::Hash{}(t); + } + }; + + struct StatefulTestingEqual : absl::container_internal::hash_testing_internal::WithId + { + template + bool operator()(const T& t, const U& u) const + { + return t == u; + } + }; + + // It is expected that Alloc() == Alloc() for all allocators so we cannot use + // WithId base. We need to explicitly assign ids. + template + struct Alloc : std::allocator + { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : + id_(id) + { + } + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : + std::allocator(that), + id_(that.id()) + { + } + + template + struct rebind + { + using other = Alloc; + }; + + size_t id() const + { + return id_; + } + + friend bool operator==(const Alloc& a, const Alloc& b) + { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) + { + return !(a == b); + } + + private: + size_t id_ = (std::numeric_limits::max)(); + }; + + template + auto items(const Map& m) -> std::vector< + std::pair> + { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) + res.emplace_back(get<0>(v), get<1>(v)); + return res; + } + + template + auto keys(const Set& s) + -> std::vector::type> + { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) + res.emplace_back(v); + return res; + } + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425) || \ + (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9)) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h new file mode 100644 index 00000000..0208f211 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,171 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include + +#include "absl/container/internal/common_policy_traits.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Defines how slots are initialized/destroyed/moved. + template + struct hash_policy_traits : common_policy_traits + { + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + private: + struct ReturnKey + { + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + template::value, int> = 0> + static key_type& Impl(Key&& k, int) + { + return *std::launder( + const_cast(std::addressof(std::forward(k))) + ); + } +#endif + + template + static Key Impl(Key&& k, char) + { + return std::forward(k); + } + + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + auto operator()(Key&& k, const Args&...) const + -> decltype(Impl(std::forward(k), 0)) + { + return Impl(std::forward(k), 0); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type + { + }; + + template + struct ConstantIteratorsImpl> : P::constant_iterators + { + }; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) + { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) + { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto mutable_key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), hash_policy_traits::element(slot))) + { + return P::apply(ReturnKey(), hash_policy_traits::element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) + { + return P::value(elem); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h new file mode 100644 index 00000000..6da7252c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h @@ -0,0 +1,122 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Returns the number of probes required to lookup `key`. Returns 0 for a + // search with no collisions. Higher values mean more hash collisions occurred; + // however, the exact meaning of this number varies according to the container + // type. + template + size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key + ) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); + } + + // Gets a histogram of the number of probes for each elements in the container. + // The sum of all the values in the vector is equal to container.size(). + template + std::vector GetHashtableDebugNumProbesHistogram(const C& container) + { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) + { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0) + ); + v.resize((std::max)(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; + } + + struct HashtableDebugProbeSummary + { + size_t total_elements; + size_t total_num_probes; + double mean; + }; + + // Gets a summary of the probe count distribution for the elements in the + // container. + template + HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) + { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) + { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + template + size_t AllocatedByteSize(const C& c) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); + } + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` + // and `c.size()` is equal to `num_elements`. + template + size_t LowerBoundAllocatedByteSize(size_t num_elements) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); + } + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 00000000..4f0d2114 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,95 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hashtable_debug_internal + { + + // If it is a map, call get<0>(). + using std::get; + template + auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) + { + return get<0>(pair); + } + + // If it is not a map, return the value directly. + template + const typename T::key_type& GetKey(const typename T::key_type& key, char) + { + return key; + } + + // Containers should specialize this to provide debug information for that + // container. + template + struct HashtableDebugAccess + { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, const typename Container::key_type& key) + { + if (!c.bucket_count()) + return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) + { + if (it == e) + return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) + return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); + }; + + } // namespace hashtable_debug_internal + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h new file mode 100644 index 00000000..b68fa102 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h @@ -0,0 +1,316 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hashtablez_sampler.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for a low level library to sample hashtables +// and collect runtime statistics about them. +// +// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which +// store information about a single sample. +// +// `Record*` methods store information into samples. +// `Sample()` and `Unsample()` make use of a single global sampler with +// properties controlled by the flags hashtablez_enabled, +// hashtablez_sample_rate, and hashtablez_max_samples. +// +// WARNING +// +// Using this sampling API may cause sampled Swiss tables to use the global +// allocator (operator `new`) in addition to any custom allocator. If you +// are using a table in an unusual circumstance where allocation or calling a +// linux syscall is unacceptable, this could interfere. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Stores information about a sampled hashtable. All mutations to this *must* + // be made through `Record*` functions below. All reads from this *must* only + // occur in the callback to `HashtablezSampler::Iterate`. + struct HashtablezInfo : public profiling_internal::Sample + { + // Constructs the object but does not fill in any fields. + HashtablezInfo(); + ~HashtablezInfo(); + HashtablezInfo(const HashtablezInfo&) = delete; + HashtablezInfo& operator=(const HashtablezInfo&) = delete; + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic capacity; + std::atomic size; + std::atomic num_erases; + std::atomic num_rehashes; + std::atomic max_probe_length; + std::atomic total_probe_length; + std::atomic hashes_bitwise_or; + std::atomic hashes_bitwise_and; + std::atomic hashes_bitwise_xor; + std::atomic max_reserve; + + // All of the fields below are set by `PrepareForSampling`, they must not be + // mutated in `Record*` functions. They are logically `const` in that sense. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. + static constexpr int kMaxStackDepth = 64; + absl::Time create_time; + int32_t depth; + void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? + }; + + void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length); + + void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity); + + void RecordClearedReservationSlow(HashtablezInfo* info); + + void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity); + + void RecordInsertSlow(HashtablezInfo* info, size_t hash, size_t distance_from_desired); + + void RecordEraseSlow(HashtablezInfo* info); + + struct SamplingState + { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; + }; + + HashtablezInfo* SampleSlow(SamplingState& next_sample, size_t inline_element_size); + void UnsampleSlow(HashtablezInfo* info); + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + class HashtablezInfoHandle + { + public: + explicit HashtablezInfoHandle() : + info_(nullptr) + { + } + explicit HashtablezInfoHandle(HashtablezInfo* info) : + info_(info) + { + } + + // We do not have a destructor. Caller is responsible for calling Unregister + // before destroying the handle. + void Unregister() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + UnsampleSlow(info_); + } + + HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; + HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; + + HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept + : + info_(absl::exchange(o.info_, nullptr)) + { + } + HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept + { + if (ABSL_PREDICT_FALSE(info_ != nullptr)) + { + UnsampleSlow(info_); + } + info_ = absl::exchange(o.info_, nullptr); + return *this; + } + + inline void RecordStorageChanged(size_t size, size_t capacity) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordStorageChangedSlow(info_, size, capacity); + } + + inline void RecordRehash(size_t total_probe_length) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordRehashSlow(info_, total_probe_length); + } + + inline void RecordReservation(size_t target_capacity) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordClearedReservationSlow(info_); + } + + inline void RecordInsert(size_t hash, size_t distance_from_desired) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordInsertSlow(info_, hash, distance_from_desired); + } + + inline void RecordErase() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordEraseSlow(info_); + } + + friend inline void swap(HashtablezInfoHandle& lhs, HashtablezInfoHandle& rhs) + { + std::swap(lhs.info_, rhs.info_); + } + + private: + friend class HashtablezInfoHandlePeer; + HashtablezInfo* info_; + }; +#else + // Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can + // be removed by the linker, in order to reduce the binary size. + class HashtablezInfoHandle + { + public: + explicit HashtablezInfoHandle() = default; + explicit HashtablezInfoHandle(std::nullptr_t) + { + } + + inline void Unregister() + { + } + inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) + { + } + inline void RecordRehash(size_t /*total_probe_length*/) + { + } + inline void RecordReservation(size_t /*target_capacity*/) + { + } + inline void RecordClearedReservation() + { + } + inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) + { + } + inline void RecordErase() + { + } + + friend inline void swap(HashtablezInfoHandle& /*lhs*/, HashtablezInfoHandle& /*rhs*/) + { + } + }; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + + // Returns an RAII sampling handle that manages registration and unregistation + // with the global sampler. + inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED + ) + { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) + { + return HashtablezInfoHandle(nullptr); + } + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size) + ); +#else + return HashtablezInfoHandle(nullptr); +#endif // !ABSL_PER_THREAD_TLS + } + + using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder; + + // Returns a global Sampler. + HashtablezSampler& GlobalHashtablezSampler(); + + using HashtablezConfigListener = void (*)(); + void SetHashtablezConfigListener(HashtablezConfigListener l); + + // Enables or disables sampling for Swiss tables. + bool IsHashtablezEnabled(); + void SetHashtablezEnabled(bool enabled); + void SetHashtablezEnabledInternal(bool enabled); + + // Sets the rate at which Swiss tables will be sampled. + int32_t GetHashtablezSampleParameter(); + void SetHashtablezSampleParameter(int32_t rate); + void SetHashtablezSampleParameterInternal(int32_t rate); + + // Sets a soft max for the number of samples that will be kept. + size_t GetHashtablezMaxSamples(); + void SetHashtablezMaxSamples(size_t max); + void SetHashtablezMaxSamplesInternal(size_t max); + + // Configuration override. + // This allows process-wide sampling without depending on order of + // initialization of static storage duration objects. + // The definition of this constant is weak, which allows us to inject a + // different value for it at link time. + extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h new file mode 100644 index 00000000..1a55ec2b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h @@ -0,0 +1,1249 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/macros.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace inlined_vector_internal + { + +// GCC does not deal very well with the below code +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif + + template + using AllocatorTraits = std::allocator_traits; + template + using ValueType = typename AllocatorTraits::value_type; + template + using SizeType = typename AllocatorTraits::size_type; + template + using Pointer = typename AllocatorTraits::pointer; + template + using ConstPointer = typename AllocatorTraits::const_pointer; + template + using SizeType = typename AllocatorTraits::size_type; + template + using DifferenceType = typename AllocatorTraits::difference_type; + template + using Reference = ValueType&; + template + using ConstReference = const ValueType&; + template + using Iterator = Pointer; + template + using ConstIterator = ConstPointer; + template + using ReverseIterator = typename std::reverse_iterator>; + template + using ConstReverseIterator = typename std::reverse_iterator>; + template + using MoveIterator = typename std::move_iterator>; + + template + using IsAtLeastForwardIterator = std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>; + + template + using IsMoveAssignOk = std::is_move_assignable>; + template + using IsSwapOk = absl::type_traits_internal::IsSwappable>; + + template + struct TypeIdentity + { + using type = T; + }; + + // Used for function arguments in template functions to prevent ADL by forcing + // callers to explicitly specify the template parameter. + template + using NoTypeDeduction = typename TypeIdentity::type; + + template>::value> + struct DestroyAdapter; + + template + struct DestroyAdapter + { + static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) + { + for (SizeType i = destroy_size; i != 0;) + { + --i; + AllocatorTraits::destroy(allocator, destroy_first + i); + } + } + }; + + template + struct DestroyAdapter + { + static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) + { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } + }; + + template + struct Allocation + { + Pointer data = nullptr; + SizeType capacity = 0; + }; + + template) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> + struct MallocAdapter + { + static Allocation Allocate(A& allocator, SizeType requested_capacity) + { + return {AllocatorTraits::allocate(allocator, requested_capacity), requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, SizeType capacity) + { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } + }; + + template + void ConstructElements(NoTypeDeduction& allocator, Pointer construct_first, ValueAdapter& values, SizeType construct_size) + { + for (SizeType i = 0; i < construct_size; ++i) + { + ABSL_INTERNAL_TRY + { + values.ConstructNext(allocator, construct_first + i); + } + ABSL_INTERNAL_CATCH_ANY + { + DestroyAdapter::DestroyElements(allocator, construct_first, i); + ABSL_INTERNAL_RETHROW; + } + } + } + + template + void AssignElements(Pointer assign_first, ValueAdapter& values, SizeType assign_size) + { + for (SizeType i = 0; i < assign_size; ++i) + { + values.AssignNext(assign_first + i); + } + } + + template + struct StorageView + { + Pointer data; + SizeType size; + SizeType capacity; + }; + + template + class IteratorValueAdapter + { + public: + explicit IteratorValueAdapter(const Iterator& it) : + it_(it) + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at, *it_); + ++it_; + } + + void AssignNext(Pointer assign_at) + { + *assign_at = *it_; + ++it_; + } + + private: + Iterator it_; + }; + + template + class CopyValueAdapter + { + public: + explicit CopyValueAdapter(ConstPointer p) : + ptr_(p) + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at, *ptr_); + } + + void AssignNext(Pointer assign_at) + { + *assign_at = *ptr_; + } + + private: + ConstPointer ptr_; + }; + + template + class DefaultValueAdapter + { + public: + explicit DefaultValueAdapter() + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at); + } + + void AssignNext(Pointer assign_at) + { + *assign_at = ValueType(); + } + }; + + template + class AllocationTransaction + { + public: + explicit AllocationTransaction(A& allocator) : + allocator_data_(allocator, nullptr), + capacity_(0) + { + } + + ~AllocationTransaction() + { + if (DidAllocate()) + { + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + A& GetAllocator() + { + return allocator_data_.template get<0>(); + } + Pointer& GetData() + { + return allocator_data_.template get<1>(); + } + SizeType& GetCapacity() + { + return capacity_; + } + + bool DidAllocate() + { + return GetData() != nullptr; + } + + Pointer Allocate(SizeType requested_capacity) + { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; + } + + ABSL_MUST_USE_RESULT Allocation Release() && + { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: + void Reset() + { + GetData() = nullptr; + GetCapacity() = 0; + } + + container_internal::CompressedTuple> allocator_data_; + SizeType capacity_; + }; + + template + class ConstructionTransaction + { + public: + explicit ConstructionTransaction(A& allocator) : + allocator_data_(allocator, nullptr), + size_(0) + { + } + + ~ConstructionTransaction() + { + if (DidConstruct()) + { + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); + } + } + + ConstructionTransaction(const ConstructionTransaction&) = delete; + void operator=(const ConstructionTransaction&) = delete; + + A& GetAllocator() + { + return allocator_data_.template get<0>(); + } + Pointer& GetData() + { + return allocator_data_.template get<1>(); + } + SizeType& GetSize() + { + return size_; + } + + bool DidConstruct() + { + return GetData() != nullptr; + } + template + void Construct(Pointer data, ValueAdapter& values, SizeType size) + { + ConstructElements(GetAllocator(), data, values, size); + GetData() = data; + GetSize() = size; + } + void Commit() && + { + GetData() = nullptr; + GetSize() = 0; + } + + private: + container_internal::CompressedTuple> allocator_data_; + SizeType size_; + }; + + template + class Storage + { + public: + struct MemcpyPolicy + { + }; + struct ElementwiseAssignPolicy + { + }; + struct ElementwiseSwapPolicy + { + }; + struct ElementwiseConstructPolicy + { + }; + + using MoveAssignmentPolicy = absl::conditional_t< + // Fast path: if the value type can be trivially move assigned and + // destroyed, and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for + // `other` and remove its own reference to them. It's as if we had + // individually move-assigned each value and then destroyed the original. + absl::conjunction>, absl::is_trivially_destructible>, std::is_same>>>::value, + MemcpyPolicy, + // Otherwise we use move assignment if possible. If not, we simulate + // move assignment using move construction. + // + // Note that this is in contrast to e.g. std::vector and std::optional, + // which are themselves not move-assignable when their contained type is + // not. + absl::conditional_t::value, ElementwiseAssignPolicy, ElementwiseConstructPolicy>>; + + // The policy to be used specifically when swapping inlined elements. + using SwapInlinedElementsPolicy = absl::conditional_t< + // Fast path: if the value type can be trivially move constructed/assigned + // and destroyed, and we know the allocator doesn't do anything fancy, + // then it's safe for us to simply swap the bytes in the inline storage. + // It's as if we had move-constructed a temporary vector, move-assigned + // one to the other, then move-assigned the first from the temporary. + absl::conjunction>, absl::is_trivially_move_assignable>, absl::is_trivially_destructible>, std::is_same>>>::value, + MemcpyPolicy, + absl::conditional_t::value, ElementwiseSwapPolicy, ElementwiseConstructPolicy>>; + + static SizeType NextCapacity(SizeType current_capacity) + { + return current_capacity * 2; + } + + static SizeType ComputeCapacity(SizeType current_capacity, SizeType requested_capacity) + { + return (std::max)(NextCapacity(current_capacity), requested_capacity); + } + + // --------------------------------------------------------------------------- + // Storage Constructors and Destructor + // --------------------------------------------------------------------------- + + Storage() : + metadata_(A(), /* size and is_allocated */ 0u) + { + } + + explicit Storage(const A& allocator) : + metadata_(allocator, /* size and is_allocated */ 0u) + { + } + + ~Storage() + { + // Fast path: if we are empty and not allocated, there's nothing to do. + if (GetSizeAndIsAllocated() == 0) + { + return; + } + + // Fast path: if no destructors need to be run and we know the allocator + // doesn't do anything fancy, then all we need to do is deallocate (and + // maybe not even that). + if (absl::is_trivially_destructible>::value && + std::is_same>>::value) + { + DeallocateIfAllocated(); + return; + } + + DestroyContents(); + } + + // --------------------------------------------------------------------------- + // Storage Member Accessors + // --------------------------------------------------------------------------- + + SizeType& GetSizeAndIsAllocated() + { + return metadata_.template get<1>(); + } + + const SizeType& GetSizeAndIsAllocated() const + { + return metadata_.template get<1>(); + } + + SizeType GetSize() const + { + return GetSizeAndIsAllocated() >> 1; + } + + bool GetIsAllocated() const + { + return GetSizeAndIsAllocated() & 1; + } + + Pointer GetAllocatedData() + { + return data_.allocated.allocated_data; + } + + ConstPointer GetAllocatedData() const + { + return data_.allocated.allocated_data; + } + + // ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be + // uninitialized, a common pattern in allocate()+construct() APIs. + // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking + // NOTE: When this was written, LLVM documentation did not explicitly + // mention that casting `char*` and using `reinterpret_cast` qualifies + // as a bad cast. + ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer GetInlinedData() + { + return reinterpret_cast>(data_.inlined.inlined_data); + } + + ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer GetInlinedData() const + { + return reinterpret_cast>(data_.inlined.inlined_data); + } + + SizeType GetAllocatedCapacity() const + { + return data_.allocated.allocated_capacity; + } + + SizeType GetInlinedCapacity() const + { + return static_cast>(kOptimalInlinedSize); + } + + StorageView MakeStorageView() + { + return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), GetAllocatedCapacity()} : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; + } + + A& GetAllocator() + { + return metadata_.template get<0>(); + } + + const A& GetAllocator() const + { + return metadata_.template get<0>(); + } + + // --------------------------------------------------------------------------- + // Storage Member Mutators + // --------------------------------------------------------------------------- + + ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); + + template + void Initialize(ValueAdapter values, SizeType new_size); + + template + void Assign(ValueAdapter values, SizeType new_size); + + template + void Resize(ValueAdapter values, SizeType new_size); + + template + Iterator Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count); + + template + Reference EmplaceBack(Args&&... args); + + Iterator Erase(ConstIterator from, ConstIterator to); + + void Reserve(SizeType requested_capacity); + + void ShrinkToFit(); + + void Swap(Storage* other_storage_ptr); + + void SetIsAllocated() + { + GetSizeAndIsAllocated() |= static_cast>(1); + } + + void UnsetIsAllocated() + { + GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); + } + + void SetSize(SizeType size) + { + GetSizeAndIsAllocated() = + (size << 1) | static_cast>(GetIsAllocated()); + } + + void SetAllocatedSize(SizeType size) + { + GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); + } + + void SetInlinedSize(SizeType size) + { + GetSizeAndIsAllocated() = size << static_cast>(1); + } + + void AddSize(SizeType count) + { + GetSizeAndIsAllocated() += count << static_cast>(1); + } + + void SubtractSize(SizeType count) + { + ABSL_HARDENING_ASSERT(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast>(1); + } + + void SetAllocation(Allocation allocation) + { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; + } + + void MemcpyFrom(const Storage& other_storage) + { + // Assumption check: it doesn't make sense to memcpy inlined elements unless + // we know the allocator doesn't do anything fancy, and one of the following + // holds: + // + // * The elements are trivially relocatable. + // + // * It's possible to trivially assign the elements and then destroy the + // source. + // + // * It's possible to trivially copy construct/assign the elements. + // + { + using V = ValueType; + ABSL_HARDENING_ASSERT( + other_storage.GetIsAllocated() || + (std::is_same>::value && + ( + // First case above + absl::is_trivially_relocatable::value || + // Second case above + (absl::is_trivially_move_assignable::value && + absl::is_trivially_destructible::value) || + // Third case above + (absl::is_trivially_copy_constructible::value || + absl::is_trivially_copy_assignable::value) + )) + ); + } + + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; + } + + void DeallocateIfAllocated() + { + if (GetIsAllocated()) + { + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), GetAllocatedCapacity()); + } + } + + private: + ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); + + using Metadata = container_internal::CompressedTuple>; + + struct Allocated + { + Pointer allocated_data; + SizeType allocated_capacity; + }; + + // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the + // `InlinedVector`. Sometimes, it is possible to increase the capacity (from + // the user requested `N`) without increasing the size of the `InlinedVector`. + static constexpr size_t kOptimalInlinedSize = + (std::max)(N, sizeof(Allocated) / sizeof(ValueType)); + + struct Inlined + { + alignas(ValueType) char inlined_data[sizeof( + ValueType[kOptimalInlinedSize] + )]; + }; + + union Data + { + Allocated allocated; + Inlined inlined; + }; + + void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n); + void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n); + + void SwapInlinedElements(MemcpyPolicy, Storage* other); + template + void SwapInlinedElements(NotMemcpyPolicy, Storage* other); + + template + ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); + + Metadata metadata_; + Data data_; + }; + + template + void Storage::DestroyContents() + { + Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); + DeallocateIfAllocated(); + } + + template + void Storage::InitFrom(const Storage& other) + { + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. + ConstPointer src; + Pointer dst; + if (!other.GetIsAllocated()) + { + dst = GetInlinedData(); + src = other.GetInlinedData(); + } + else + { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; + src = other.GetAllocatedData(); + } + + // Fast path: if the value type is trivially copy constructible and we know + // the allocator doesn't do anything fancy, then we know it is legal for us to + // simply memcpy the other vector's elements. + if (absl::is_trivially_copy_constructible>::value && + std::is_same>>::value) + { + std::memcpy(reinterpret_cast(dst), reinterpret_cast(src), n * sizeof(ValueType)); + } + else + { + auto values = IteratorValueAdapter>(src); + ConstructElements(GetAllocator(), dst, values, n); + } + + GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); + } + + template + template + auto Storage::Initialize(ValueAdapter values, SizeType new_size) + -> void + { + // Only callable from constructors! + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); + + Pointer construct_data; + if (new_size > GetInlinedCapacity()) + { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); + SetIsAllocated(); + } + else + { + construct_data = GetInlinedData(); + } + + ConstructElements(GetAllocator(), construct_data, values, new_size); + + // Since the initial size was guaranteed to be `0` and the allocated bit is + // already correct for either case, *adding* `new_size` gives us the correct + // result faster than setting it directly. + AddSize(new_size); + } + + template + template + auto Storage::Assign(ValueAdapter values, SizeType new_size) + -> void + { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocator()); + + absl::Span> assign_loop; + absl::Span> construct_loop; + absl::Span> destroy_loop; + + if (new_size > storage_view.capacity) + { + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } + else if (new_size > storage_view.size) + { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, new_size - storage_view.size}; + } + else + { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + AssignElements(assign_loop.data(), values, assign_loop.size()); + + ConstructElements(GetAllocator(), construct_loop.data(), values, construct_loop.size()); + + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), destroy_loop.size()); + + if (allocation_tx.DidAllocate()) + { + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + SetSize(new_size); + } + + template + template + auto Storage::Resize(ValueAdapter values, SizeType new_size) + -> void + { + StorageView storage_view = MakeStorageView(); + Pointer const base = storage_view.data; + const SizeType size = storage_view.size; + A& alloc = GetAllocator(); + if (new_size <= size) + { + // Destroy extra old elements. + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); + } + else if (new_size <= storage_view.capacity) + { + // Construct new elements in place. + ConstructElements(alloc, base + size, values, new_size - size); + } + else + { + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to new backing store. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, values, new_size - size); + + IteratorValueAdapter> move_values( + (MoveIterator(base)) + ); + ConstructElements(alloc, new_data, move_values, size); + + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + SetSize(new_size); + } + + template + template + auto Storage::Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count) -> Iterator + { + StorageView storage_view = MakeStorageView(); + + auto insert_index = static_cast>( + std::distance(ConstIterator(storage_view.data), pos) + ); + SizeType insert_end_index = insert_index + insert_count; + SizeType new_size = storage_view.size + insert_count; + + if (new_size > storage_view.capacity) + { + AllocationTransaction allocation_tx(GetAllocator()); + ConstructionTransaction construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + construction_tx.Construct(new_data + insert_index, values, insert_count); + + move_construction_tx.Construct(new_data, move_values, insert_index); + + ConstructElements(GetAllocator(), new_data + insert_end_index, move_values, storage_view.size - insert_index); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + + SetAllocatedSize(new_size); + return Iterator(new_data + insert_index); + } + else + { + SizeType move_construction_destination_index = + (std::max)(insert_end_index, storage_view.size); + + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_construction_values( + MoveIterator(storage_view.data + (move_construction_destination_index - insert_count)) + ); + absl::Span> move_construction = { + storage_view.data + move_construction_destination_index, + new_size - move_construction_destination_index}; + + Pointer move_assignment_values = storage_view.data + insert_index; + absl::Span> move_assignment = { + storage_view.data + insert_end_index, + move_construction_destination_index - insert_end_index}; + + absl::Span> insert_assignment = {move_assignment_values, move_construction.size()}; + + absl::Span> insert_construction = { + insert_assignment.data() + insert_assignment.size(), + insert_count - insert_assignment.size()}; + + move_construction_tx.Construct(move_construction.data(), move_construction_values, move_construction.size()); + + for (Pointer + destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); + ;) + { + --destination; + --source; + if (destination < last_destination) + break; + *destination = std::move(*source); + } + + AssignElements(insert_assignment.data(), values, insert_assignment.size()); + + ConstructElements(GetAllocator(), insert_construction.data(), values, insert_construction.size()); + + std::move(move_construction_tx).Commit(); + + AddSize(insert_count); + return Iterator(storage_view.data + insert_index); + } + } + + template + template + auto Storage::EmplaceBack(Args&&... args) -> Reference + { + StorageView storage_view = MakeStorageView(); + const SizeType n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) + { + // Fast path; new element fits. + Pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); + AddSize(1); + return *last_ptr; + } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward(args)...); + } + + template + template + auto Storage::EmplaceBackSlow(Args&&... args) -> Reference + { + StorageView storage_view = MakeStorageView(); + AllocationTransaction allocation_tx(GetAllocator()); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); + Pointer last_ptr = construct_data + storage_view.size; + + // Construct new element. + AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY + { + ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY + { + AllocatorTraits::destroy(GetAllocator(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + AddSize(1); + return *last_ptr; + } + + template + auto Storage::Erase(ConstIterator from, ConstIterator to) + -> Iterator + { + StorageView storage_view = MakeStorageView(); + + auto erase_size = static_cast>(std::distance(from, to)); + auto erase_index = static_cast>( + std::distance(ConstIterator(storage_view.data), from) + ); + SizeType erase_end_index = erase_index + erase_size; + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data + erase_end_index) + ); + + AssignElements(storage_view.data + erase_index, move_values, storage_view.size - erase_end_index); + + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), erase_size + ); + + SubtractSize(erase_size); + return Iterator(storage_view.data + erase_index); + } + + template + auto Storage::Reserve(SizeType requested_capacity) -> void + { + StorageView storage_view = MakeStorageView(); + + if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) + return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + SizeType new_requested_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); + + ConstructElements(GetAllocator(), new_data, move_values, storage_view.size); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + template + auto Storage::ShrinkToFit() -> void + { + // May only be called on allocated instances! + ABSL_HARDENING_ASSERT(GetIsAllocated()); + + StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; + + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) + return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + Pointer construct_data; + if (storage_view.size > GetInlinedCapacity()) + { + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) + { + // Already using the smallest available heap allocation. + return; + } + } + else + { + construct_data = GetInlinedData(); + } + + ABSL_INTERNAL_TRY + { + ConstructElements(GetAllocator(), construct_data, move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY + { + SetAllocation({storage_view.data, storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, storage_view.capacity); + + if (allocation_tx.DidAllocate()) + { + SetAllocation(std::move(allocation_tx).Release()); + } + else + { + UnsetIsAllocated(); + } + } + + template + auto Storage::Swap(Storage* other_storage_ptr) -> void + { + using std::swap; + ABSL_HARDENING_ASSERT(this != other_storage_ptr); + + if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) + { + swap(data_.allocated, other_storage_ptr->data_.allocated); + } + else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) + { + SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr); + } + else + { + Storage* allocated_ptr = this; + Storage* inlined_ptr = other_storage_ptr; + if (!allocated_ptr->GetIsAllocated()) + swap(allocated_ptr, inlined_ptr); + + StorageView allocated_storage_view{ + allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), allocated_ptr->GetAllocatedCapacity()}; + + IteratorValueAdapter> move_values( + MoveIterator(inlined_ptr->GetInlinedData()) + ); + + ABSL_INTERNAL_TRY + { + ConstructElements(inlined_ptr->GetAllocator(), allocated_ptr->GetInlinedData(), move_values, inlined_ptr->GetSize()); + } + ABSL_INTERNAL_CATCH_ANY + { + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), inlined_ptr->GetInlinedData(), inlined_ptr->GetSize()); + + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, allocated_storage_view.capacity}); + } + + swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); + swap(GetAllocator(), other_storage_ptr->GetAllocator()); + } + + template + void Storage::SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n) + { + std::swap_ranges(GetInlinedData(), GetInlinedData() + n, other->GetInlinedData()); + } + + template + void Storage::SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n) + { + Pointer a = GetInlinedData(); + Pointer b = other->GetInlinedData(); + // see note on allocators in `SwapInlinedElements`. + A& allocator_a = GetAllocator(); + A& allocator_b = other->GetAllocator(); + for (SizeType i = 0; i < n; ++i, ++a, ++b) + { + ValueType tmp(std::move(*a)); + + AllocatorTraits::destroy(allocator_a, a); + AllocatorTraits::construct(allocator_b, a, std::move(*b)); + + AllocatorTraits::destroy(allocator_b, b); + AllocatorTraits::construct(allocator_a, b, std::move(tmp)); + } + } + + template + void Storage::SwapInlinedElements(MemcpyPolicy, Storage* other) + { + Data tmp = data_; + data_ = other->data_; + other->data_ = tmp; + } + + template + template + void Storage::SwapInlinedElements(NotMemcpyPolicy policy, Storage* other) + { + // Note: `destroy` needs to use pre-swap allocator while `construct` - + // post-swap allocator. Allocators will be swapped later on outside of + // `SwapInlinedElements`. + Storage* small_ptr = this; + Storage* large_ptr = other; + if (small_ptr->GetSize() > large_ptr->GetSize()) + { + std::swap(small_ptr, large_ptr); + } + + auto small_size = small_ptr->GetSize(); + auto diff = large_ptr->GetSize() - small_size; + SwapN(policy, other, small_size); + + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_size) + ); + + ConstructElements(large_ptr->GetAllocator(), small_ptr->GetInlinedData() + small_size, move_values, diff); + + DestroyAdapter::DestroyElements(large_ptr->GetAllocator(), large_ptr->GetInlinedData() + small_size, diff); + } + +// End ignore "array-bounds" +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + } // namespace inlined_vector_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/layout.h b/CAPI/cpp/grpc/include/absl/container/internal/layout.h new file mode 100644 index 00000000..c37e8b59 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/layout.h @@ -0,0 +1,809 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // A type wrapper that instructs `Layout` to use the specific alignment for the + // array. `Layout<..., Aligned, ...>` has exactly the same API + // and behavior as `Layout<..., T, ...>` except that the first element of the + // array of `T` is aligned to `N` (the rest of the elements follow without + // padding). + // + // Requires: `N >= alignof(T)` and `N` is a power of 2. + template + struct Aligned; + + namespace internal_layout + { + + template + struct NotAligned + { + }; + + template + struct NotAligned> + { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); + }; + + template + using IntToSize = size_t; + + template + using TypeToSize = size_t; + + template + struct Type : NotAligned + { + using type = T; + }; + + template + struct Type> + { + using type = T; + }; + + template + struct SizeOf : NotAligned, std::integral_constant + { + }; + + template + struct SizeOf> : std::integral_constant + { + }; + + // Note: workaround for https://gcc.gnu.org/PR88115 + template + struct AlignOf : NotAligned + { + static constexpr size_t value = alignof(T); + }; + + template + struct AlignOf> + { + static_assert(N % alignof(T) == 0, "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; + }; + + // Does `Ts...` contain `T`? + template + using Contains = absl::disjunction...>; + + template + using CopyConst = + typename std::conditional::value, const To, To>::type; + + // Note: We're not qualifying this with absl:: because it doesn't compile under + // MSVC. + template + using SliceType = Span; + + // This namespace contains no types. It prevents functions defined in it from + // being found by ADL. + namespace adl_barrier + { + + template + constexpr size_t Find(Needle, Needle, Ts...) + { + static_assert(!Contains(), "Duplicate element type"); + return 0; + } + + template + constexpr size_t Find(Needle, T, Ts...) + { + return adl_barrier::Find(Needle(), Ts()...) + 1; + } + + constexpr bool IsPow2(size_t n) + { + return !(n & (n - 1)); + } + + // Returns `q * m` for the smallest `q` such that `q * m >= n`. + // Requires: `m` is a power of two. It's enforced by IsLegalElementType below. + constexpr size_t Align(size_t n, size_t m) + { + return (n + m - 1) & ~(m - 1); + } + + constexpr size_t Min(size_t a, size_t b) + { + return b < a ? b : a; + } + + constexpr size_t Max(size_t a) + { + return a; + } + + template + constexpr size_t Max(size_t a, size_t b, Ts... rest) + { + return adl_barrier::Max(b < a ? a : b, rest...); + } + + template + std::string TypeName() + { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) + { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } + else + { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; + } + + } // namespace adl_barrier + + template + using EnableIf = typename std::enable_if::type; + + // Can `T` be a template argument of `Layout`? + template + using IsLegalElementType = std::integral_constant< + bool, + !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + + template + class LayoutImpl; + + // Public base class of `Layout` and the result type of `Layout::Partial()`. + // + // `Elements...` contains all template arguments of `Layout` that created this + // instance. + // + // `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments + // passed to `Layout::Partial()` or `Layout::Layout()`. + // + // `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is + // `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we + // can compute offsets). + template + class LayoutImpl, absl::index_sequence, absl::index_sequence> + { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, "Invalid element type (see IsLegalElementType)"); + + enum + { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() + { + static_assert(Contains, Type::type>...>(), "Type not found"); + return adl_barrier::Find(Type(), Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) : + size_{sizes...} + { + } + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() + { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const + { + return 0; + } + + template = 0> + constexpr size_t Offset() const + { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>::value * size_[N - 1], + ElementAlignment::value + ); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const + { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const + { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const + { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const + { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const + { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const + { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char" + ); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const + { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const + { + return std::tuple>*...>( + Pointer(p)... + ); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const + { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const + { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const + { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)... + ); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const + { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>::value * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const + { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const + { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) + { + size_t start = + Offset() + SizeOf>::value * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const + { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>::value...}; + const std::string types[] = { + adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) + { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) + { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; + }; + + template + using LayoutType = LayoutImpl< + std::tuple, + absl::make_index_sequence, + absl::make_index_sequence>; + + } // namespace internal_layout + + // Descriptor of arrays of various types and sizes laid out in memory one after + // another. See the top of the file for documentation. + // + // Check out the public API of internal_layout::LayoutImpl above. The type is + // internal to the library but its methods are public, and they are inherited + // by `Layout`. + template + class Layout : public internal_layout::LayoutType + { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)" + ); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) + { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) : + internal_layout::LayoutType(sizes...) + { + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h new file mode 100644 index 00000000..4ab459fb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h @@ -0,0 +1,105 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + struct node_slot_policy + { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) + { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) + { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) + { + if (slot == nullptr) + return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) + { + return **slot; + } + + template + static auto value(T* elem) -> decltype(P::value(elem)) + { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) + { + return P::apply(std::forward(ts)...); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h new file mode 100644 index 00000000..f6d9342b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h @@ -0,0 +1,229 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class raw_hash_map : public raw_hash_set + { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()) + )); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()) + )); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + + // TODO(b/187807849): Evaluate whether to support reference mapped_type and + // remove this assertion if/when it is supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() + { + } + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert_or_assign(k, v).first; + } + + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + template::value, int>::type = 0, K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = this->find(key); + if (it == this->end()) + { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at" + ); + } + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = this->find(key); + if (it == this->end()) + { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at" + ); + } + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h new file mode 100644 index 00000000..d8b2e78c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h @@ -0,0 +1,3431 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// # Table Layout +// +// A raw_hash_set's backing array consists of control bytes followed by slots +// that may or may not contain objects. +// +// The layout of the backing array, for `capacity` slots, is thus, as a +// pseudo-struct: +// +// struct BackingArray { +// // The number of elements we can insert before growing the capacity. +// size_t growth_left; +// // Control bytes for the "real" slots. +// ctrl_t ctrl[capacity]; +// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to +// // stop and serves no other purpose. +// ctrl_t sentinel; +// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so +// // that if a probe sequence picks a value near the end of `ctrl`, +// // `Group` will have valid control bytes to look at. +// ctrl_t clones[kWidth - 1]; +// // The actual slot data. +// slot_type slots[capacity]; +// }; +// +// The length of this array is computed by `AllocSize()` below. +// +// Control bytes (`ctrl_t`) are bytes (collected into groups of a +// platform-specific size) that define the state of the corresponding slot in +// the slot array. Group manipulation is tightly optimized to be as efficient +// as possible: SSE and friends on x86, clever bit operations on other arches. +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// Each control byte is either a special value for empty slots, deleted slots +// (sometimes called *tombstones*), and a special end-of-table marker used by +// iterators, or, if occupied, seven bits (H2) from the hash of the value in the +// corresponding slot. +// +// Storing control bytes in a separate array also has beneficial cache effects, +// since more logical slots will fit into a cache line. +// +// # Hashing +// +// We compute two separate hashes, `H1` and `H2`, from the hash of an object. +// `H1(hash(x))` is an index into `slots`, and essentially the starting point +// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out +// objects that cannot possibly be the one we are looking for. +// +// # Table operations. +// +// The key operations are `insert`, `find`, and `erase`. +// +// Since `insert` and `erase` are implemented in terms of `find`, we describe +// `find` first. To `find` a value `x`, we compute `hash(x)`. From +// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every +// group of slots in some interesting order. +// +// We now walk through these indices. At each index, we select the entire group +// starting with that index and extract potential candidates: occupied slots +// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the +// group, we stop and return an error. Each candidate slot `y` is compared with +// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the +// next probe index. Tombstones effectively behave like full slots that never +// match the value we're looking for. +// +// The `H2` bits ensure when we compare a slot to an object with `==`, we are +// likely to have actually found the object. That is, the chance is low that +// `==` is called and returns `false`. Thus, when we search for an object, we +// are unlikely to call `==` many times. This likelyhood can be analyzed as +// follows (assuming that H2 is a random enough hash function). +// +// Let's assume that there are `k` "wrong" objects that must be examined in a +// probe sequence. For example, when doing a `find` on an object that is in the +// table, `k` is the number of objects between the start of the probe sequence +// and the final found object (not including the final found object). The +// expected number of objects with an H2 match is then `k/128`. Measurements +// and analysis indicate that even at high load factors, `k` is less than 32, +// meaning that the number of "false positive" comparisons we must perform is +// less than 1/8 per `find`. + +// `insert` is implemented in terms of `unchecked_insert`, which inserts a +// value presumed to not be in the table (violating this requirement will cause +// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert +// it, we construct a `probe_seq` once again, and use it to find the first +// group with an unoccupied (empty *or* deleted) slot. We place `x` into the +// first such slot in the group and mark it as full with `x`'s H2. +// +// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and +// perform a `find` to see if it's already present; if it is, we're done. If +// it's not, we may decide the table is getting overcrowded (i.e. the load +// factor is greater than 7/8 for big tables; `is_small()` tables use a max load +// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` +// each element of the table into the new array (we know that no insertion here +// will insert an already-present value), and discard the old backing array. At +// this point, we may `unchecked_insert` the value `x`. +// +// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which +// presents a viable, initialized slot pointee to the caller. +// +// `erase` is implemented in terms of `erase_at`, which takes an index to a +// slot. Given an offset, we simply create a tombstone and destroy its contents. +// If we can prove that the slot would not appear in a probe sequence, we can +// make the slot as empty, instead. We can prove this by observing that if a +// group has any empty slots, it has never been full (assuming we never create +// an empty slot in a group with no empties, which this heuristic guarantees we +// never do) and find would stop at this group anyways (since it does not probe +// beyond groups with empties). +// +// `erase` is `erase_at` composed with `find`: if we +// have a value `x`, we can perform a `find`, and then `erase_at` the resulting +// slot. +// +// To iterate, we simply traverse the array, skipping empty and deleted slots +// and stopping when we hit a `kSentinel`. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/base/prefetch.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS +#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the backing +// array and iterators. In the backing array, we store the generation between +// the control bytes and the slots. When iterators are dereferenced, we assert +// that the container has not been mutated in a way that could cause iterator +// invalidation since the iterator was initialized. +#define ABSL_SWISSTABLE_ENABLE_GENERATIONS +#endif + + // We use uint8_t so we don't need to worry about padding. + using GenerationType = uint8_t; + + // A sentinel value for empty generations. Using 0 makes it easy to constexpr + // initialize an array of this value. + constexpr GenerationType SentinelEmptyGeneration() + { + return 0; + } + + constexpr GenerationType NextGeneration(GenerationType generation) + { + return ++generation == SentinelEmptyGeneration() ? ++generation : generation; + } + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS + constexpr bool SwisstableGenerationsEnabled() + { + return true; + } + constexpr size_t NumGenerationBytes() + { + return sizeof(GenerationType); + } +#else + constexpr bool SwisstableGenerationsEnabled() + { + return false; + } + constexpr size_t NumGenerationBytes() + { + return 0; + } +#endif + + template + void SwapAlloc(AllocType& lhs, AllocType& rhs, std::true_type /* propagate_on_container_swap */) + { + using std::swap; + swap(lhs, rhs); + } + template + void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) + { + } + + // The state for a probe sequence. + // + // Currently, the sequence is a triangular progression of the form + // + // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) + // + // The use of `Width` ensures that each probe step does not overlap groups; + // the sequence effectively outputs the addresses of *groups* (although not + // necessarily aligned to any boundary). The `Group` machinery allows us + // to check an entire group with minimal branching. + // + // Wrapping around at `mask + 1` is important, but not for the obvious reason. + // As described above, the first few entries of the control byte array + // are mirrored at the end of the array, which `Group` will find and use + // for selecting candidates. However, when those candidates' slots are + // actually inspected, there are no corresponding slots for the cloned bytes, + // so we need to make sure we've treated those offsets as "wrapping around". + // + // It turns out that this probe sequence visits every group exactly once if the + // number of groups is a power of two, since (i^2+i)/2 is a bijection in + // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing + template + class probe_seq + { + public: + // Creates a new probe sequence using `hash` as the initial value of the + // sequence and `mask` (usually the capacity of the table) as the mask to + // apply to each value in the progression. + probe_seq(size_t hash, size_t mask) + { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + + // The offset within the table, i.e., the value `p(i)` above. + size_t offset() const + { + return offset_; + } + size_t offset(size_t i) const + { + return (offset_ + i) & mask_; + } + + void next() + { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index, a multiple of `Width`. + size_t index() const + { + return index_; + } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; + }; + + template + struct RequireUsableKey + { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), std::declval()))>* + operator()(const PassedKey&, const Args&...) const; + }; + + template + struct IsDecomposable : std::false_type + { + }; + + template + struct IsDecomposable< + absl::void_t(), + std::declval()... + ))>, + Policy, + Hash, + Eq, + Ts...> : std::true_type + { + }; + + // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. + template + constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) + { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); + } + template + constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) + { + return false; + } + + template + uint32_t TrailingZeros(T x) + { + ABSL_ASSUME(x != 0); + return static_cast(countr_zero(x)); + } + + // An abstract bitmask, such as that emitted by a SIMD instruction. + // + // Specifically, this type implements a simple bitset whose representation is + // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number + // of abstract bits in the bitset, while `Shift` is the log-base-two of the + // width of an abstract bit in the representation. + // This mask provides operations for any number of real bits set in an abstract + // bit. To add iteration on top of that, implementation must guarantee no more + // than one real bit is set in an abstract bit. + template + class NonIterableBitMask + { + public: + explicit NonIterableBitMask(T mask) : + mask_(mask) + { + } + + explicit operator bool() const + { + return this->mask_ != 0; + } + + // Returns the index of the lowest *abstract* bit set in `self`. + uint32_t LowestBitSet() const + { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the index of the highest *abstract* bit set in `self`. + uint32_t HighestBitSet() const + { + return static_cast((bit_width(mask_) - 1) >> Shift); + } + + // Returns the number of trailing zero *abstract* bits. + uint32_t TrailingZeros() const + { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the number of leading zero *abstract* bits. + uint32_t LeadingZeros() const + { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; + } + + T mask_; + }; + + // Mask that can be iterable + // + // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just + // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When + // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as + // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. + // + // For example: + // for (int i : BitMask(0b101)) -> yields 0, 2 + // for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 + template + class BitMask : public NonIterableBitMask + { + using Base = NonIterableBitMask; + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + explicit BitMask(T mask) : + Base(mask) + { + } + // BitMask is an iterator over the indices of its abstract bits. + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + BitMask& operator++() + { + this->mask_ &= (this->mask_ - 1); + return *this; + } + + uint32_t operator*() const + { + return Base::LowestBitSet(); + } + + BitMask begin() const + { + return *this; + } + BitMask end() const + { + return BitMask(0); + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) + { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) + { + return a.mask_ != b.mask_; + } + }; + + using h2_t = uint8_t; + + // The values here are selected for maximum performance. See the static asserts + // below for details. + + // A `ctrl_t` is a single control byte, which can have one of four + // states: empty, deleted, full (which has an associated seven-bit h2_t value) + // and the sentinel. They have the following bit patterns: + // + // empty: 1 0 0 0 0 0 0 0 + // deleted: 1 1 1 1 1 1 1 0 + // full: 0 h h h h h h h // h represents the hash bits. + // sentinel: 1 1 1 1 1 1 1 1 + // + // These values are specifically tuned for SSE-flavored SIMD. + // The static_asserts below detail the source of these choices. + // + // We use an enum class so that when strict aliasing is enabled, the compiler + // knows ctrl_t doesn't alias other types. + enum class ctrl_t : int8_t + { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 + }; + static_assert( + (static_cast(ctrl_t::kEmpty) & + static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x80) != 0, + "Special markers need to have the MSB to make checking for them efficient" + ); + static_assert( + ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, + "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " + "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient" + ); + static_assert( + ctrl_t::kSentinel == static_cast(-1), + "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)" + ); + static_assert(ctrl_t::kEmpty == static_cast(-128), "ctrl_t::kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); + static_assert( + (~static_cast(ctrl_t::kEmpty) & + ~static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x7F) != 0, + "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " + "shared by ctrl_t::kSentinel to make the scalar test for " + "MaskEmptyOrDeleted() efficient" + ); + static_assert(ctrl_t::kDeleted == static_cast(-2), "ctrl_t::kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + + // See definition comment for why this is size 32. + ABSL_DLL extern const ctrl_t kEmptyGroup[32]; + + // Returns a pointer to a control byte group that can be used by empty tables. + inline ctrl_t* EmptyGroup() + { + // Const must be cast away here; no uses of this function will actually write + // to it, because it is only used for empty tables. + return const_cast(kEmptyGroup + 16); + } + + // Returns a pointer to a generation to use for an empty hashtable. + GenerationType* EmptyGeneration(); + + // Returns whether `generation` is a generation for an empty hashtable that + // could be returned by EmptyGeneration(). + inline bool IsEmptyGeneration(const GenerationType* generation) + { + return *generation == SentinelEmptyGeneration(); + } + + // Mixes a randomly generated per-process seed with `hash` and `ctrl` to + // randomize insertion order within groups. + bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); + + // Returns a per-table, hash salt, which changes on resize. This gets mixed into + // H1 to randomize iteration order per-table. + // + // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure + // non-determinism of iteration order in most cases. + inline size_t PerTableSalt(const ctrl_t* ctrl) + { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; + } + // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. + inline size_t H1(size_t hash, const ctrl_t* ctrl) + { + return (hash >> 7) ^ PerTableSalt(ctrl); + } + + // Extracts the H2 portion of a hash: the 7 bits not used for H1. + // + // These are used as an occupied control byte. + inline h2_t H2(size_t hash) + { + return hash & 0x7F; + } + + // Helpers for checking the state of a control byte. + inline bool IsEmpty(ctrl_t c) + { + return c == ctrl_t::kEmpty; + } + inline bool IsFull(ctrl_t c) + { + return c >= static_cast(0); + } + inline bool IsDeleted(ctrl_t c) + { + return c == ctrl_t::kDeleted; + } + inline bool IsEmptyOrDeleted(ctrl_t c) + { + return c < ctrl_t::kSentinel; + } + +#ifdef ABSL_INTERNAL_HAVE_SSE2 + // Quick reference guide for intrinsics used below: + // + // * __m128i: An XMM (128-bit) word. + // + // * _mm_setzero_si128: Returns a zero vector. + // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. + // + // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. + // * _mm_and_si128: Ands two i128s together. + // * _mm_or_si128: Ors two i128s together. + // * _mm_andnot_si128: And-nots two i128s together. + // + // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, + // filling each lane with 0x00 or 0xff. + // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. + // + // * _mm_loadu_si128: Performs an unaligned load of an i128. + // * _mm_storeu_si128: Performs an unaligned store of an i128. + // + // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first + // argument if the corresponding lane of the second + // argument is positive, negative, or zero, respectively. + // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a + // bitmask consisting of those bits. + // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low + // four bits of each i8 lane in the second argument as + // indices. + + // https://github.com/abseil/abseil-cpp/issues/209 + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 + // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char + // Work around this by using the portable implementation of Group + // when using -funsigned-char under GCC. + inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) + { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) + { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); + } + + struct GroupSse2Impl + { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) + { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const + { + auto match = _mm_set1_epi8(static_cast(hash)); + return BitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))) + ); + } + + // Returns a bitmask representing the positions of empty slots. + NonIterableBitMask MaskEmpty() const + { +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + // This only works because ctrl_t::kEmpty is -128. + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))) + ); +#else + auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))) + ); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + NonIterableBitMask MaskEmptyOrDeleted() const + { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return NonIterableBitMask(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + )); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const + { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return TrailingZeros(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1 + )); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; + }; +#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 + +#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) + struct GroupAArch64Impl + { + static constexpr size_t kWidth = 8; + + explicit GroupAArch64Impl(const ctrl_t* pos) + { + ctrl = vld1_u8(reinterpret_cast(pos)); + } + + BitMask Match(h2_t hash) const + { + uint8x8_t dup = vdup_n_u8(hash); + auto mask = vceq_u8(ctrl, dup); + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask( + vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs + ); + } + + NonIterableBitMask MaskEmpty() const + { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), vreinterpret_s8_u8(ctrl))), 0); + return NonIterableBitMask(mask); + } + + NonIterableBitMask MaskEmptyOrDeleted() const + { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(vdup_n_s8(static_cast(ctrl_t::kSentinel)), vreinterpret_s8_u8(ctrl))), 0); + return NonIterableBitMask(mask); + } + + uint32_t CountLeadingEmptyOrDeleted() const + { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcle_s8(vdup_n_s8(static_cast(ctrl_t::kSentinel)), vreinterpret_s8_u8(ctrl))), 0); + // Similar to MaskEmptyorDeleted() but we invert the logic to invert the + // produced bitfield. We then count number of trailing zeros. + // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, + // so we should be fine. + return static_cast(countr_zero(mask)) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t slsbs = 0x0202020202020202ULL; + constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL; + auto x = slsbs & (mask >> 6); + auto res = (x + midbs) | msbs; + little_endian::Store64(dst, res); + } + + uint8x8_t ctrl; + }; +#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN + + struct GroupPortableImpl + { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) : + ctrl(little_endian::Load64(pos)) + { + } + + BitMask Match(h2_t hash) const + { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + NonIterableBitMask MaskEmpty() const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 6)) & msbs); + } + + NonIterableBitMask MaskEmptyOrDeleted() const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const + { + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + constexpr uint64_t bits = 0x0101010101010101ULL; + return static_cast(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; + }; + +#ifdef ABSL_INTERNAL_HAVE_SSE2 + using Group = GroupSse2Impl; +#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) + using Group = GroupAArch64Impl; +#else + using Group = GroupPortableImpl; +#endif + + // When there is an insertion with no reserved growth, we rehash with + // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a + // constant divided by capacity ensures that inserting N elements is still O(N) + // in the average case. Using the constant 16 means that we expect to rehash ~8 + // times more often than when generations are disabled. We are adding expected + // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 - + // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth. + inline size_t RehashProbabilityConstant() + { + return 16; + } + + class CommonFieldsGenerationInfoEnabled + { + // A sentinel value for reserved_growth_ indicating that we just ran out of + // reserved growth on the last insertion. When reserve is called and then + // insertions take place, reserved_growth_'s state machine is N, ..., 1, + // kReservedGrowthJustRanOut, 0. + static constexpr size_t kReservedGrowthJustRanOut = + (std::numeric_limits::max)(); + + public: + CommonFieldsGenerationInfoEnabled() = default; + CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that) : + reserved_growth_(that.reserved_growth_), + reservation_size_(that.reservation_size_), + generation_(that.generation_) + { + that.reserved_growth_ = 0; + that.reservation_size_ = 0; + that.generation_ = EmptyGeneration(); + } + CommonFieldsGenerationInfoEnabled& operator=( + CommonFieldsGenerationInfoEnabled&& + ) = default; + + // Whether we should rehash on insert in order to detect bugs of using invalid + // references. We rehash on the first insertion after reserved_growth_ reaches + // 0 after a call to reserve. We also do a rehash with low probability + // whenever reserved_growth_ is zero. + bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, size_t capacity) const; + void maybe_increment_generation_on_insert() + { + if (reserved_growth_ == kReservedGrowthJustRanOut) + reserved_growth_ = 0; + + if (reserved_growth_ > 0) + { + if (--reserved_growth_ == 0) + reserved_growth_ = kReservedGrowthJustRanOut; + } + else + { + *generation_ = NextGeneration(*generation_); + } + } + void reset_reserved_growth(size_t reservation, size_t size) + { + reserved_growth_ = reservation - size; + } + size_t reserved_growth() const + { + return reserved_growth_; + } + void set_reserved_growth(size_t r) + { + reserved_growth_ = r; + } + size_t reservation_size() const + { + return reservation_size_; + } + void set_reservation_size(size_t r) + { + reservation_size_ = r; + } + GenerationType generation() const + { + return *generation_; + } + void set_generation(GenerationType g) + { + *generation_ = g; + } + GenerationType* generation_ptr() const + { + return generation_; + } + void set_generation_ptr(GenerationType* g) + { + generation_ = g; + } + + private: + // The number of insertions remaining that are guaranteed to not rehash due to + // a prior call to reserve. Note: we store reserved growth in addition to + // reservation size because calls to erase() decrease size_ but don't decrease + // reserved growth. + size_t reserved_growth_ = 0; + // The maximum argument to reserve() since the container was cleared. We need + // to keep track of this, in addition to reserved growth, because we reset + // reserved growth to this when erase(begin(), end()) is called. + size_t reservation_size_ = 0; + // Pointer to the generation counter, which is used to validate iterators and + // is stored in the backing array between the control bytes and the slots. + // Note that we can't store the generation inside the container itself and + // keep a pointer to the container in the iterators because iterators must + // remain valid when the container is moved. + // Note: we could derive this pointer from the control pointer, but it makes + // the code more complicated, and there's a benefit in having the sizes of + // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different, + // which is that tests are less likely to rely on the size remaining the same. + GenerationType* generation_ = EmptyGeneration(); + }; + + class CommonFieldsGenerationInfoDisabled + { + public: + CommonFieldsGenerationInfoDisabled() = default; + CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) = + default; + CommonFieldsGenerationInfoDisabled& operator=( + CommonFieldsGenerationInfoDisabled&& + ) = default; + + bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const + { + return false; + } + void maybe_increment_generation_on_insert() + { + } + void reset_reserved_growth(size_t, size_t) + { + } + size_t reserved_growth() const + { + return 0; + } + void set_reserved_growth(size_t) + { + } + size_t reservation_size() const + { + return 0; + } + void set_reservation_size(size_t) + { + } + GenerationType generation() const + { + return 0; + } + void set_generation(GenerationType) + { + } + GenerationType* generation_ptr() const + { + return nullptr; + } + void set_generation_ptr(GenerationType*) + { + } + }; + + class HashSetIteratorGenerationInfoEnabled + { + public: + HashSetIteratorGenerationInfoEnabled() = default; + explicit HashSetIteratorGenerationInfoEnabled( + const GenerationType* generation_ptr + ) : + generation_ptr_(generation_ptr), + generation_(*generation_ptr) + { + } + + GenerationType generation() const + { + return generation_; + } + void reset_generation() + { + generation_ = *generation_ptr_; + } + const GenerationType* generation_ptr() const + { + return generation_ptr_; + } + void set_generation_ptr(const GenerationType* ptr) + { + generation_ptr_ = ptr; + } + + private: + const GenerationType* generation_ptr_ = EmptyGeneration(); + GenerationType generation_ = *generation_ptr_; + }; + + class HashSetIteratorGenerationInfoDisabled + { + public: + HashSetIteratorGenerationInfoDisabled() = default; + explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) + { + } + + GenerationType generation() const + { + return 0; + } + void reset_generation() + { + } + const GenerationType* generation_ptr() const + { + return nullptr; + } + void set_generation_ptr(const GenerationType*) + { + } + }; + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS + using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled; + using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled; +#else + using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled; + using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled; +#endif + + // Returns whether `n` is a valid capacity (i.e., number of slots). + // + // A valid capacity is a non-zero integer `2^m - 1`. + inline bool IsValidCapacity(size_t n) + { + return ((n + 1) & n) == 0 && n > 0; + } + + // Computes the offset from the start of the backing allocation of the control + // bytes. growth_left is stored at the beginning of the backing array. + inline size_t ControlOffset() + { + return sizeof(size_t); + } + + // Returns the number of "cloned control bytes". + // + // This is the number of control bytes that are present both at the beginning + // of the control byte array and at the end, such that we can create a + // `Group::kWidth`-width probe window starting from any control byte. + constexpr size_t NumClonedBytes() + { + return Group::kWidth - 1; + } + + // Given the capacity of a table, computes the offset (from the start of the + // backing allocation) of the generation counter (if it exists). + inline size_t GenerationOffset(size_t capacity) + { + assert(IsValidCapacity(capacity)); + const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); + return ControlOffset() + num_control_bytes; + } + + // Given the capacity of a table, computes the offset (from the start of the + // backing allocation) at which the slots begin. + inline size_t SlotOffset(size_t capacity, size_t slot_align) + { + assert(IsValidCapacity(capacity)); + return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) & + (~slot_align + 1); + } + + // Given the capacity of a table, computes the total size of the backing + // array. + inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) + { + return SlotOffset(capacity, slot_align) + capacity * slot_size; + } + + // CommonFields hold the fields in raw_hash_set that do not depend + // on template parameters. This allows us to conveniently pass all + // of this state to helper functions as a single argument. + class CommonFields : public CommonFieldsGenerationInfo + { + public: + CommonFields() = default; + + // Not copyable + CommonFields(const CommonFields&) = delete; + CommonFields& operator=(const CommonFields&) = delete; + + // Movable + CommonFields(CommonFields&& that) : + CommonFieldsGenerationInfo( + std::move(static_cast(that)) + ), + // Explicitly copying fields into "this" and then resetting "that" + // fields generates less code then calling absl::exchange per field. + control_(that.control()), + slots_(that.slot_array()), + capacity_(that.capacity()), + compressed_tuple_(that.size(), std::move(that.infoz())) + { + that.set_control(EmptyGroup()); + that.set_slots(nullptr); + that.set_capacity(0); + that.set_size(0); + } + CommonFields& operator=(CommonFields&&) = default; + + ctrl_t* control() const + { + return control_; + } + void set_control(ctrl_t* c) + { + control_ = c; + } + void* backing_array_start() const + { + // growth_left is stored before control bytes. + assert(reinterpret_cast(control()) % alignof(size_t) == 0); + return control() - sizeof(size_t); + } + + // Note: we can't use slots() because Qt defines "slots" as a macro. + void* slot_array() const + { + return slots_; + } + void set_slots(void* s) + { + slots_ = s; + } + + // The number of filled slots. + size_t size() const + { + return compressed_tuple_.template get<0>(); + } + void set_size(size_t s) + { + compressed_tuple_.template get<0>() = s; + } + + // The total number of available slots. + size_t capacity() const + { + return capacity_; + } + void set_capacity(size_t c) + { + assert(c == 0 || IsValidCapacity(c)); + capacity_ = c; + } + + // The number of slots we can still fill without needing to rehash. + // This is stored in the heap allocation before the control bytes. + size_t growth_left() const + { + return *reinterpret_cast(backing_array_start()); + } + void set_growth_left(size_t gl) + { + *reinterpret_cast(backing_array_start()) = gl; + } + + HashtablezInfoHandle& infoz() + { + return compressed_tuple_.template get<1>(); + } + const HashtablezInfoHandle& infoz() const + { + return compressed_tuple_.template get<1>(); + } + + bool should_rehash_for_bug_detection_on_insert() const + { + return CommonFieldsGenerationInfo:: + should_rehash_for_bug_detection_on_insert(control(), capacity()); + } + void reset_reserved_growth(size_t reservation) + { + CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); + } + + // The size of the backing array allocation. + size_t alloc_size(size_t slot_size, size_t slot_align) const + { + return AllocSize(capacity(), slot_size, slot_align); + } + + // Returns the number of control bytes set to kDeleted. For testing only. + size_t TombstonesCount() const + { + return static_cast( + std::count(control(), control() + capacity(), ctrl_t::kDeleted) + ); + } + + private: + // TODO(b/259599413): Investigate removing some of these fields: + // - control/slots can be derived from each other + // - we can use 6 bits for capacity since it's always a power of two minus 1 + + // The control bytes (and, also, a pointer near to the base of the backing + // array). + // + // This contains `capacity + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + // + // Note that growth_left is stored immediately before this pointer. + ctrl_t* control_ = EmptyGroup(); + + // The beginning of the slots, located at `SlotOffset()` bytes after + // `control`. May be null for empty tables. + void* slots_ = nullptr; + + size_t capacity_ = 0; + + // Bundle together size and HashtablezInfoHandle to ensure EBO for + // HashtablezInfoHandle when sampling is turned off. + absl::container_internal::CompressedTuple + compressed_tuple_{0u, HashtablezInfoHandle{}}; + }; + + template + class raw_hash_set; + + // Returns the next valid capacity after `n`. + inline size_t NextCapacity(size_t n) + { + assert(IsValidCapacity(n) || n == 0); + return n * 2 + 1; + } + + // Applies the following mapping to every byte in the control array: + // * kDeleted -> kEmpty + // * kEmpty -> kEmpty + // * _ -> kDeleted + // PRECONDITION: + // IsValidCapacity(capacity) + // ctrl[capacity] == ctrl_t::kSentinel + // ctrl[i] != ctrl_t::kSentinel for all i < capacity + void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); + + // Converts `n` into the next valid capacity, per `IsValidCapacity`. + inline size_t NormalizeCapacity(size_t n) + { + return n ? ~size_t{} >> countl_zero(n) : 1; + } + + // General notes on capacity/growth methods below: + // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an + // average of two empty slots per group. + // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. + // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we + // never need to probe (the whole table fits in one group) so we don't need a + // load factor less than 1. + + // Given `capacity`, applies the load factor; i.e., it returns the maximum + // number of values we should put into the table before a resizing rehash. + inline size_t CapacityToGrowth(size_t capacity) + { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) + { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; + } + + // Given `growth`, "unapplies" the load factor to find how large the capacity + // should be to stay within the load factor. + // + // This might not be a valid capacity and `NormalizeCapacity()` should be + // called on this. + inline size_t GrowthToLowerboundCapacity(size_t growth) + { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) + { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); + } + + template + size_t SelectBucketCountForIterRange(InputIter first, InputIter last, size_t bucket_count) + { + if (bucket_count != 0) + { + return bucket_count; + } + using InputIterCategory = + typename std::iterator_traits::iterator_category; + if (std::is_base_of::value) + { + return GrowthToLowerboundCapacity( + static_cast(std::distance(first, last)) + ); + } + return 0; + } + + constexpr bool SwisstableDebugEnabled() + { +#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \ + ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG) + return true; +#else + return false; +#endif + } + + inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation, const GenerationType* generation_ptr, const char* operation) + { + if (!SwisstableDebugEnabled()) + return; + if (ctrl == nullptr) + { + ABSL_INTERNAL_LOG(FATAL, std::string(operation) + " called on end() iterator."); + } + if (ctrl == EmptyGroup()) + { + ABSL_INTERNAL_LOG(FATAL, std::string(operation) + " called on default-constructed iterator."); + } + if (SwisstableGenerationsEnabled()) + { + if (generation != *generation_ptr) + { + ABSL_INTERNAL_LOG(FATAL, std::string(operation) + " called on invalid iterator. The table could have " + "rehashed since this iterator was initialized."); + } + if (!IsFull(*ctrl)) + { + ABSL_INTERNAL_LOG( + FATAL, + std::string(operation) + + " called on invalid iterator. The element was likely erased." + ); + } + } + else + { + if (!IsFull(*ctrl)) + { + ABSL_INTERNAL_LOG( + FATAL, + std::string(operation) + + " called on invalid iterator. The element might have been erased " + "or the table might have rehashed. Consider running with " + "--config=asan to diagnose rehashing issues." + ); + } + } + } + + // Note that for comparisons, null/end iterators are valid. + inline void AssertIsValidForComparison(const ctrl_t* ctrl, GenerationType generation, const GenerationType* generation_ptr) + { + if (!SwisstableDebugEnabled()) + return; + const bool ctrl_is_valid_for_comparison = + ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl); + if (SwisstableGenerationsEnabled()) + { + if (generation != *generation_ptr) + { + ABSL_INTERNAL_LOG(FATAL, "Invalid iterator comparison. The table could have " + "rehashed since this iterator was initialized."); + } + if (!ctrl_is_valid_for_comparison) + { + ABSL_INTERNAL_LOG( + FATAL, "Invalid iterator comparison. The element was likely erased." + ); + } + } + else + { + ABSL_HARDENING_ASSERT( + ctrl_is_valid_for_comparison && + "Invalid iterator comparison. The element might have been erased or " + "the table might have rehashed. Consider running with --config=asan to " + "diagnose rehashing issues." + ); + } + } + + // If the two iterators come from the same container, then their pointers will + // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa. + // Note: we take slots by reference so that it's not UB if they're uninitialized + // as long as we don't read them (when ctrl is null). + inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, const void* const& slot_a, const void* const& slot_b) + { + // If either control byte is null, then we can't tell. + if (ctrl_a == nullptr || ctrl_b == nullptr) + return true; + const void* low_slot = slot_a; + const void* hi_slot = slot_b; + if (ctrl_a > ctrl_b) + { + std::swap(ctrl_a, ctrl_b); + std::swap(low_slot, hi_slot); + } + return ctrl_b < low_slot && low_slot <= hi_slot; + } + + // Asserts that two iterators come from the same container. + // Note: we take slots by reference so that it's not UB if they're uninitialized + // as long as we don't read them (when ctrl is null). + inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, const void* const& slot_a, const void* const& slot_b, const GenerationType* generation_ptr_a, const GenerationType* generation_ptr_b) + { + if (!SwisstableDebugEnabled()) + return; + const bool a_is_default = ctrl_a == EmptyGroup(); + const bool b_is_default = ctrl_b == EmptyGroup(); + if (a_is_default != b_is_default) + { + ABSL_INTERNAL_LOG( + FATAL, + "Invalid iterator comparison. Comparing default-constructed iterator " + "with non-default-constructed iterator." + ); + } + if (a_is_default && b_is_default) + return; + + if (SwisstableGenerationsEnabled()) + { + if (generation_ptr_a == generation_ptr_b) + return; + const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); + const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); + if (a_is_empty != b_is_empty) + { + ABSL_INTERNAL_LOG(FATAL, "Invalid iterator comparison. Comparing iterator from " + "a non-empty hashtable with an iterator from an empty " + "hashtable."); + } + if (a_is_empty && b_is_empty) + { + ABSL_INTERNAL_LOG(FATAL, "Invalid iterator comparison. Comparing iterators from " + "different empty hashtables."); + } + const bool a_is_end = ctrl_a == nullptr; + const bool b_is_end = ctrl_b == nullptr; + if (a_is_end || b_is_end) + { + ABSL_INTERNAL_LOG(FATAL, "Invalid iterator comparison. Comparing iterator with " + "an end() iterator from a different hashtable."); + } + ABSL_INTERNAL_LOG(FATAL, "Invalid iterator comparison. Comparing non-end() " + "iterators from different hashtables."); + } + else + { + ABSL_HARDENING_ASSERT( + AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && + "Invalid iterator comparison. The iterators may be from different " + "containers or the container might have rehashed. Consider running " + "with --config=asan to diagnose rehashing issues." + ); + } + } + + struct FindInfo + { + size_t offset; + size_t probe_length; + }; + + // Whether a table is "small". A small table fits entirely into a probing + // group, i.e., has a capacity < `Group::kWidth`. + // + // In small mode we are able to use the whole capacity. The extra control + // bytes give us at least one "empty" control byte to stop the iteration. + // This is important to make 1 a valid capacity. + // + // In small mode only the first `capacity` control bytes after the sentinel + // are valid. The rest contain dummy ctrl_t::kEmpty values that do not + // represent a real slot. This is important to take into account on + // `find_first_non_full()`, where we never try + // `ShouldInsertBackwards()` for small tables. + inline bool is_small(size_t capacity) + { + return capacity < Group::kWidth - 1; + } + + // Begins a probing operation on `common.control`, using `hash`. + inline probe_seq probe(const ctrl_t* ctrl, const size_t capacity, size_t hash) + { + return probe_seq(H1(hash, ctrl), capacity); + } + inline probe_seq probe(const CommonFields& common, size_t hash) + { + return probe(common.control(), common.capacity(), hash); + } + + // Probes an array of control bits using a probe sequence derived from `hash`, + // and returns the offset corresponding to the first deleted or empty slot. + // + // Behavior when the entire table is full is undefined. + // + // NOTE: this function must work with tables having both empty and deleted + // slots in the same group. Such tables appear during `erase()`. + template + inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) + { + auto seq = probe(common, hash); + const ctrl_t* ctrl = common.control(); + while (true) + { + Group g{ctrl + seq.offset()}; + auto mask = g.MaskEmptyOrDeleted(); + if (mask) + { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) + { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() <= common.capacity() && "full table!"); + } + } + + // Extern template for inline function keep possibility of inlining. + // When compiler decided to not inline, no symbols will be added to the + // corresponding translation unit. + extern template FindInfo find_first_non_full(const CommonFields&, size_t); + + // Non-inlined version of find_first_non_full for use in less + // performance critical routines. + FindInfo find_first_non_full_outofline(const CommonFields&, size_t); + + inline void ResetGrowthLeft(CommonFields& common) + { + common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size()); + } + + // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire + // array as marked as empty. + inline void ResetCtrl(CommonFields& common, size_t slot_size) + { + const size_t capacity = common.capacity(); + ctrl_t* ctrl = common.control(); + std::memset(ctrl, static_cast(ctrl_t::kEmpty), capacity + 1 + NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; + SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); + ResetGrowthLeft(common); + } + + // Sets `ctrl[i]` to `h`. + // + // Unlike setting it directly, this function will perform bounds checks and + // mirror the value to the cloned tail if necessary. + inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h, size_t slot_size) + { + const size_t capacity = common.capacity(); + assert(i < capacity); + + auto* slot_i = static_cast(common.slot_array()) + i * slot_size; + if (IsFull(h)) + { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } + else + { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } + + ctrl_t* ctrl = common.control(); + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; + } + + // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. + inline void SetCtrl(const CommonFields& common, size_t i, h2_t h, size_t slot_size) + { + SetCtrl(common, i, static_cast(h), slot_size); + } + + // growth_left (which is a size_t) is stored with the backing array. + constexpr size_t BackingArrayAlignment(size_t align_of_slot) + { + return (std::max)(align_of_slot, alignof(size_t)); + } + + template + ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) + { + assert(c.capacity()); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or won't make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + const size_t sample_size = + (std::is_same>::value && + c.slot_array() == nullptr) ? + SizeOfSlot : + 0; + + const size_t cap = c.capacity(); + const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot); + // growth_left (which is a size_t) is stored with the backing array. + char* mem = static_cast( + Allocate(&alloc, alloc_size) + ); + const GenerationType old_generation = c.generation(); + c.set_generation_ptr( + reinterpret_cast(mem + GenerationOffset(cap)) + ); + c.set_generation(NextGeneration(old_generation)); + c.set_control(reinterpret_cast(mem + ControlOffset())); + c.set_slots(mem + SlotOffset(cap, AlignOfSlot)); + ResetCtrl(c, SizeOfSlot); + if (sample_size) + { + c.infoz() = Sample(sample_size); + } + c.infoz().RecordStorageChanged(c.size(), cap); + } + + // PolicyFunctions bundles together some information for a particular + // raw_hash_set instantiation. This information is passed to + // type-erased functions that want to do small amounts of type-specific + // work. + struct PolicyFunctions + { + size_t slot_size; + + // Returns the hash of the pointed-to slot. + size_t (*hash_slot)(void* set, void* slot); + + // Transfer the contents of src_slot to dst_slot. + void (*transfer)(void* set, void* dst_slot, void* src_slot); + + // Deallocate the backing store from common. + void (*dealloc)(CommonFields& common, const PolicyFunctions& policy); + }; + + // ClearBackingArray clears the backing array, either modifying it in place, + // or creating a new one based on the value of "reuse". + // REQUIRES: c.capacity > 0 + void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, bool reuse); + + // Type-erased version of raw_hash_set::erase_meta_only. + void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size); + + // Function to place in PolicyFunctions::dealloc for raw_hash_sets + // that are using std::allocator. This allows us to share the same + // function body for raw_hash_set instantiations that have the + // same slot alignment. + template + ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common, const PolicyFunctions& policy) + { + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size * common.capacity()); + + std::allocator alloc; + Deallocate( + &alloc, common.backing_array_start(), common.alloc_size(policy.slot_size, AlignOfSlot) + ); + } + + // For trivially relocatable types we use memcpy directly. This allows us to + // share the same function body for raw_hash_set instantiations that have the + // same slot size as long as they are relocatable. + template + ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) + { + memcpy(dst, src, SizeOfSlot); + } + + // Type-erased version of raw_hash_set::drop_deletes_without_resize. + void DropDeletesWithoutResize(CommonFields& common, const PolicyFunctions& policy, void* tmp_space); + + // A SwissTable. + // + // Policy: a policy defines how to perform different operations on + // the slots of the hashtable (see hash_policy_traits.h for the full interface + // of policy). + // + // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The + // functor should accept a key and return size_t as hash. For best performance + // it is important that the hash function provides high entropy across all bits + // of the hash. + // + // Eq: a (possibly polymorphic) functor that compares two keys for equality. It + // should accept two (of possibly different type) keys and return a bool: true + // if they are equal, false if they are not. If two keys compare equal, then + // their hash values as defined by Hash MUST be equal. + // + // Allocator: an Allocator + // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which + // the storage of the hashtable will be allocated and the elements will be + // constructed and destroyed. + template + class raw_hash_set + { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, "Policy::element() must return a reference"); + + template + struct SameAsElementReference : std::is_same::type>::type, typename std::remove_cv::type>::type> + { + }; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); + + class iterator : private HashSetIteratorGenerationInfo + { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() + { + } + + // PRECONDITION: not an end() iterator. + reference operator*() const + { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()"); + return PolicyTraits::element(slot_); + } + + // PRECONDITION: not an end() iterator. + pointer operator->() const + { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->"); + return &operator*(); + } + + // PRECONDITION: not an end() iterator. + iterator& operator++() + { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++"); + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) + { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) + { + AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr()); + AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr()); + AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_, a.generation_ptr(), b.generation_ptr()); + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) + { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl, slot_type* slot, const GenerationType* generation_ptr) : + HashSetIteratorGenerationInfo(generation_ptr), + ctrl_(ctrl), + slot_(slot) + { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_ASSUME(ctrl != nullptr); + } + // For end() iterators. + explicit iterator(const GenerationType* generation_ptr) : + HashSetIteratorGenerationInfo(generation_ptr), + ctrl_(nullptr) + { + } + + // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until + // they reach one. + // + // If a sentinel is reached, we null `ctrl_` out instead. + void skip_empty_or_deleted() + { + while (IsEmptyOrDeleted(*ctrl_)) + { + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) + ctrl_ = nullptr; + } + + // We use EmptyGroup() for default-constructed iterators so that they can + // be distinguished from end iterators, which have nullptr ctrl_. + ctrl_t* ctrl_ = EmptyGroup(); + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union + { + slot_type* slot_; + }; + }; + + class const_iterator + { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() = default; + // Implicit construction from iterator. + const_iterator(iterator i) : + inner_(std::move(i)) + { + } // NOLINT + + reference operator*() const + { + return *inner_; + } + pointer operator->() const + { + return inner_.operator->(); + } + + const_iterator& operator++() + { + ++inner_; + return *this; + } + const_iterator operator++(int) + { + return inner_++; + } + + friend bool operator==(const const_iterator& a, const const_iterator& b) + { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) + { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot, const GenerationType* gen) : + inner_(const_cast(ctrl), const_cast(slot), gen) + { + } + + iterator inner_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + // Note: can't use `= default` due to non-default noexcept (causes + // problems for some compilers). NOLINTNEXTLINE + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value + ) + { + } + + ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set( + size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type() + ) : + settings_(CommonFields{}, hash, eq, alloc) + { + if (bucket_count) + { + common().set_capacity(NormalizeCapacity(bucket_count)); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(bucket_count, hash, key_equal(), alloc) + { + } + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(bucket_count, hasher(), key_equal(), alloc) + { + } + + explicit raw_hash_set(const allocator_type& alloc) : + raw_hash_set(0, hasher(), key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), hash, eq, alloc) + { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) : + raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) + { + } + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hash, key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hash, key_equal(), alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) : + raw_hash_set(init, 0, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, const allocator_type& alloc) : + raw_hash_set(init, 0, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(const raw_hash_set& that) : + raw_hash_set(that, AllocTraits::select_on_container_copy_construction(that.alloc_ref())) + { + } + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) : + raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) + { + const size_t size = that.size(); + if (size == 0) + return; + reserve(size); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) + { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full_outofline(common(), hash); + SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); + emplace_at(target.offset, v); + common().maybe_increment_generation_on_insert(); + infoz().RecordInsert(hash, target.probe_length); + } + common().set_size(size); + set_growth_left(growth_left() - size); + } + + ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value + ) : + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(absl::exchange(that.common(), CommonFields{}), that.hash_ref(), that.eq_ref(), that.alloc_ref()) + { + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) : + settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) + { + if (a == that.alloc_ref()) + { + std::swap(common(), that.common()); + } + else + { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) + insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) + { + raw_hash_set tmp(that, AllocTraits::propagate_on_container_copy_assignment::value ? that.alloc_ref() : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value + ) + { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + // NOLINTNEXTLINE: not returning *this for performance. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment() + ); + } + + ~raw_hash_set() + { + const size_t cap = capacity(); + if (!cap) + return; + destroy_slots(); + + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap); + Deallocate( + &alloc_ref(), common().backing_array_start(), AllocSize(cap, sizeof(slot_type), alignof(slot_type)) + ); + + infoz().Unregister(); + } + + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return iterator(common().generation_ptr()); + } + + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_cast(this)->begin(); + } + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return iterator(common().generation_ptr()); + } + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return begin(); + } + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return end(); + } + + bool empty() const + { + return !size(); + } + size_t size() const + { + return common().size(); + } + size_t capacity() const + { + return common().capacity(); + } + size_t max_size() const + { + return (std::numeric_limits::max)(); + } + + ABSL_ATTRIBUTE_REINITIALIZES void clear() + { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + const size_t cap = capacity(); + if (cap == 0) + { + // Already guaranteed to be empty; so nothing to do. + } + else + { + destroy_slots(); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128); + } + common().set_reserved_growth(0); + common().set_reservation_size(0); + } + + inline void destroy_slots() + { + const size_t cap = capacity(); + const ctrl_t* ctrl = control(); + slot_type* slot = slot_array(); + for (size_t i = 0; i != cap; ++i) + { + if (IsFull(ctrl[i])) + { + PolicyTraits::destroy(&alloc_ref(), slot + i); + } + } + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> + std::pair insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + template< + class T, + RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_map s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(std::move(value)); + } + + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> + iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert(std::forward(value)).first; + } + + template< + class T, + RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) + { + for (; first != last; ++first) + emplace(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (!node) + return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem + ); + if (res.second) + { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } + else + { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto res = insert(std::move(node)); + node = std::move(res.node); + return res.position; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template::value, int>::type = 0> + std::pair emplace(Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return PolicyTraits::apply(EmplaceDecomposable{*this}, std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template::value, int>::type = 0> + std::pair emplace(Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`, + // and returns an iterator to the new element. + // + // `f` must abide by several restrictions: + // - it MUST call `raw_hash_set::constructor` with arguments as if a + // `raw_hash_set::value_type` is constructed, + // - it MUST NOT access the container before the call to + // `raw_hash_set::constructor`, and + // - it MUST NOT erase the lazily emplaced element. + // Doing any of these is undefined behavior. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor + { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const + { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : + alloc_(a), + slot_(slot) + { + } + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto res = find_or_prepare_insert(key); + if (res.second) + { + slot_type* slot = slot_array() + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) + { + auto it = find(key); + if (it == end()) + return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; + // if () { + // m.erase(copy_it); + // } + // } + void erase(const_iterator cit) + { + erase(cit.inner_); + } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) + { + AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()"); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + // We check for empty first because ClearBackingArray requires that + // capacity() > 0 as a precondition. + if (empty()) + return end(); + if (first == begin() && last == end()) + { + // TODO(ezb): we access control bytes in destroy_slots so it could make + // sense to combine destroy_slots and ClearBackingArray to avoid cache + // misses when the table is large. Note that we also do this in clear(). + destroy_slots(); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true); + common().set_reserved_growth(common().reservation_size()); + return end(); + } + while (first != last) + { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) + { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e;) + { + auto next = std::next(it); + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, PolicyTraits::element(it.slot_)) + .second) + { + src.erase_meta_only(it); + } + it = next; + } + } + + template + void merge(raw_hash_set&& src) + { + merge(src); + } + + node_type extract(const_iterator position) + { + AssertIsFull(position.inner_.ctrl_, position.inner_.generation(), position.inner_.generation_ptr(), "extract()"); + auto node = + CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template< + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) + { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + IsNoThrowSwappable( + typename AllocTraits::propagate_on_container_swap{} + ) + ) + { + using std::swap; + swap(common(), that.common()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{}); + } + + void rehash(size_t n) + { + if (n == 0 && capacity() == 0) + return; + if (n == 0 && size() == 0) + { + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false); + return; + } + + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity()) + { + resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + void reserve(size_t n) + { + if (n > size() + growth_left()) + { + size_t m = GrowthToLowerboundCapacity(n); + resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + common().reset_reserved_growth(n); + common().set_reservation_size(n); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const + { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const + { + (void)key; + // Avoid probing if we won't be able to prefetch the addresses received. +#ifdef ABSL_HAVE_PREFETCH + prefetch_heap_block(); + auto seq = probe(common(), hash_ref()(key)); + PrefetchToLocalCache(control() + seq.offset()); + PrefetchToLocalCache(slot_array() + seq.offset()); +#endif // ABSL_HAVE_PREFETCH + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto seq = probe(common(), hash); + slot_type* slot_ptr = slot_array(); + const ctrl_t* ctrl = control(); + while (true) + { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slot_ptr + seq.offset(i)) + ))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + return end(); + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + } + template + iterator find(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const + { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = find(key); + if (it != end()) + return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key + ) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + auto it = find(key); + if (it != end()) + return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const + { + return capacity(); + } + float load_factor() const + { + return capacity() ? static_cast(size()) / capacity() : 0.0; + } + float max_load_factor() const + { + return 1.0f; + } + void max_load_factor(float) + { + // Does nothing. + } + + hasher hash_function() const + { + return hash_ref(); + } + key_equal key_eq() const + { + return eq_ref(); + } + allocator_type get_allocator() const + { + return alloc_ref(); + } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) + { + if (a.size() != b.size()) + return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) + std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) + return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) + { + return !(a == b); + } + + template + friend typename std::enable_if::value, H>::type + AbslHashValue(H h, const raw_hash_set& s) + { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), s.size()); + } + + friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) + { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement + { + template + const_iterator operator()(const K& key, Args&&...) const + { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement + { + template + size_t operator()(const K& key, Args&&...) const + { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement + { + template + bool operator()(const K2& lhs, Args&&...) const + { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable + { + template + std::pair operator()(const K& key, Args&&... args) const + { + auto res = s.find_or_prepare_insert(key); + if (res.second) + { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot + { + template + std::pair operator()(const K& key, Args&&...) && + { + auto res = s.find_or_prepare_insert(key); + if (res.second) + { + PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first, &slot); + } + else if (do_destroy) + { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // Erases, but does not destroy, the value pointed to by `it`. + // + // This merely updates the pertinent control byte. This can be used in + // conjunction with Policy::transfer to move the object to another place. + void erase_meta_only(const_iterator it) + { + EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type)); + } + + // Allocates a backing array for `self` and initializes its control bytes. + // This reads `capacity` and updates all other fields based on the result of + // the allocation. + // + // This does not free the currently held array; `capacity` must be nonzero. + inline void initialize_slots() + { + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). + using CharAlloc = + typename absl::allocator_traits::template rebind_alloc; + InitializeSlots( + common(), CharAlloc(alloc_ref()) + ); + } + + ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) + { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = control(); + auto* old_slots = slot_array(); + const size_t old_capacity = common().capacity(); + common().set_capacity(new_capacity); + initialize_slots(); + + auto* new_slots = slot_array(); + size_t total_probe_length = 0; + for (size_t i = 0; i != old_capacity; ++i) + { + if (IsFull(old_ctrl[i])) + { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(common(), hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + SetCtrl(common(), new_i, H2(hash), sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i); + } + } + if (old_capacity) + { + SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); + Deallocate( + &alloc_ref(), old_ctrl - ControlOffset(), AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)) + ); + } + infoz().RecordRehash(total_probe_length); + } + + // Prunes control bytes to remove as many tombstones as possible. + // + // See the comment on `rehash_and_grow_if_necessary()`. + inline void drop_deletes_without_resize() + { + // Stack-allocate space for swapping elements. + alignas(slot_type) unsigned char tmp[sizeof(slot_type)]; + DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp); + } + + // Called whenever the table *might* need to conditionally grow. + // + // This function is an optimization opportunity to perform a rehash even when + // growth is unnecessary, because vacating tombstones is beneficial for + // performance in the long-run. + void rehash_and_grow_if_necessary() + { + const size_t cap = capacity(); + if (cap > Group::kWidth && + // Do these calculations in 64-bit to avoid overflow. + size() * uint64_t{32} <= cap * uint64_t{25}) + { + // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity. + // Rationale for such a high factor: 1) drop_deletes_without_resize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity) vs. this code (which rehashes in place + // if we can recover 3/32*capacity). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 + drop_deletes_without_resize(); + } + else + { + // Otherwise grow the container. + resize(NextCapacity(cap)); + } + } + + bool has_element(const value_type& elem) const + { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(common(), hash); + const ctrl_t* ctrl = control(); + while (true) + { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE( + PolicyTraits::element(slot_array() + seq.offset(i)) == elem + )) + return true; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + return false; + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + return false; + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) + { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) + { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + // Attempts to find `key` in the table; if it isn't found, returns a slot that + // the value can be inserted into, with the control byte already set to + // `key`'s H2. + template + std::pair find_or_prepare_insert(const K& key) + { + prefetch_heap_block(); + auto hash = hash_ref()(key); + auto seq = probe(common(), hash); + const ctrl_t* ctrl = control(); + while (true) + { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slot_array() + seq.offset(i)) + ))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + break; + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + return {prepare_insert(hash), true}; + } + + // Given the hash of a value not currently in the table, finds the next + // viable slot index to insert it at. + // + // REQUIRES: At least one non-full slot available. + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE + { + const bool rehash_for_bug_detection = + common().should_rehash_for_bug_detection_on_insert(); + if (rehash_for_bug_detection) + { + // Move to a different heap allocation in order to detect bugs. + const size_t cap = capacity(); + resize(growth_left() > 0 ? cap : NextCapacity(cap)); + } + auto target = find_first_non_full(common(), hash); + if (!rehash_for_bug_detection && + ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(control()[target.offset]))) + { + rehash_and_grow_if_necessary(); + target = find_first_non_full(common(), hash); + } + common().set_size(common().size() + 1); + set_growth_left(growth_left() - IsEmpty(control()[target.offset])); + SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); + common().maybe_increment_generation_on_insert(); + infoz().RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) + { + PolicyTraits::construct(&alloc_ref(), slot_array() + i, std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == iterator_at(i) && "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return {control() + i, slot_array() + i, common().generation_ptr()}; + } + const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return {control() + i, slot_array() + i, common().generation_ptr()}; + } + + private: + friend struct RawHashSetTestOnlyAccess; + + // The number of slots we can still fill without needing to rehash. + // + // This is stored separately due to tombstones: we do not include tombstones + // in the growth capacity, because we'd like to rehash when the table is + // otherwise filled with tombstones: otherwise, probe sequences might get + // unacceptably long without triggering a rehash. Callers can also force a + // rehash via the standard `rehash(0)`, which will recompute this value as a + // side-effect. + // + // See `CapacityToGrowth()`. + size_t growth_left() const + { + return common().growth_left(); + } + void set_growth_left(size_t gl) + { + return common().set_growth_left(gl); + } + + // Prefetch the heap-allocated memory region to resolve potential TLB and + // cache misses. This is intended to overlap with execution of calculating the + // hash for a key. + void prefetch_heap_block() const + { +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + __builtin_prefetch(control(), 0, 1); +#endif + } + + CommonFields& common() + { + return settings_.template get<0>(); + } + const CommonFields& common() const + { + return settings_.template get<0>(); + } + + ctrl_t* control() const + { + return common().control(); + } + slot_type* slot_array() const + { + return static_cast(common().slot_array()); + } + HashtablezInfoHandle& infoz() + { + return common().infoz(); + } + + hasher& hash_ref() + { + return settings_.template get<1>(); + } + const hasher& hash_ref() const + { + return settings_.template get<1>(); + } + key_equal& eq_ref() + { + return settings_.template get<2>(); + } + const key_equal& eq_ref() const + { + return settings_.template get<2>(); + } + allocator_type& alloc_ref() + { + return settings_.template get<3>(); + } + const allocator_type& alloc_ref() const + { + return settings_.template get<3>(); + } + + // Make type-specific functions for this type's PolicyFunctions struct. + static size_t hash_slot_fn(void* set, void* slot) + { + auto* h = static_cast(set); + return PolicyTraits::apply( + HashElement{h->hash_ref()}, + PolicyTraits::element(static_cast(slot)) + ); + } + static void transfer_slot_fn(void* set, void* dst, void* src) + { + auto* h = static_cast(set); + PolicyTraits::transfer(&h->alloc_ref(), static_cast(dst), static_cast(src)); + } + // Note: dealloc_fn will only be used if we have a non-standard allocator. + static void dealloc_fn(CommonFields& common, const PolicyFunctions&) + { + auto* set = reinterpret_cast(&common); + + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(common.slot_array(), sizeof(slot_type) * common.capacity()); + + Deallocate( + &set->alloc_ref(), common.backing_array_start(), common.alloc_size(sizeof(slot_type), alignof(slot_type)) + ); + } + + static const PolicyFunctions& GetPolicyFunctions() + { + static constexpr PolicyFunctions value = { + sizeof(slot_type), + &raw_hash_set::hash_slot_fn, + PolicyTraits::transfer_uses_memcpy() ? TransferRelocatable : &raw_hash_set::transfer_slot_fn, + (std::is_same>::value ? &DeallocateStandard : &raw_hash_set::dealloc_fn), + }; + return value; + } + + // Bundle together CommonFields plus other objects which might be empty. + // CompressedTuple will ensure that sizeof is not affected by any of the empty + // fields that occur after CommonFields. + absl::container_internal::CompressedTuple + settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}}; + }; + + // Erases all elements that satisfy the predicate `pred` from the container `c`. + template + typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c + ) + { + const auto initial_size = c->size(); + for (auto it = c->begin(), last = c->end(); it != last;) + { + if (pred(*it)) + { + c->erase(it++); + } + else + { + ++it; + } + } + return initial_size - c->size(); + } + + namespace hashtable_debug_internal + { + template + struct HashtableDebugAccess> + { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) + { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = probe(set.common(), hash); + const ctrl_t* ctrl = set.control(); + while (true) + { + container_internal::Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(container_internal::H2(hash))) + { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slot_array() + seq.offset(i)) + )) + return num_probes; + ++num_probes; + } + if (g.MaskEmpty()) + return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) + { + size_t capacity = c.capacity(); + if (capacity == 0) + return 0; + size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) + { + m += per_slot * c.size(); + } + else + { + const ctrl_t* ctrl = c.control(); + for (size_t i = 0; i != capacity; ++i) + { + if (container_internal::IsFull(ctrl[i])) + { + m += Traits::space_used(c.slot_array() + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) + { + size_t capacity = GrowthToLowerboundCapacity(size); + if (capacity == 0) + return 0; + size_t m = + AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) + { + m += per_slot * size; + } + return m; + } + }; + + } // namespace hashtable_debug_internal + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h new file mode 100644 index 00000000..46f3b17b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h @@ -0,0 +1,340 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ +#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ + +#include +#include + +#include "absl/types/compare.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace test_internal + { + + // A type that counts number of occurrences of the type, the live occurrences of + // the type, as well as the number of copies, moves, swaps, and comparisons that + // have occurred on the type. This is used as a base class for the copyable, + // copyable+movable, and movable types below that are used in actual tests. Use + // InstanceTracker in tests to track the number of instances. + class BaseCountedInstance + { + public: + explicit BaseCountedInstance(int x) : + value_(x) + { + ++num_instances_; + ++num_live_instances_; + } + BaseCountedInstance(const BaseCountedInstance& x) : + value_(x.value_), + is_live_(x.is_live_) + { + ++num_instances_; + if (is_live_) + ++num_live_instances_; + ++num_copies_; + } + BaseCountedInstance(BaseCountedInstance&& x) : + value_(x.value_), + is_live_(x.is_live_) + { + x.is_live_ = false; + ++num_instances_; + ++num_moves_; + } + ~BaseCountedInstance() + { + --num_instances_; + if (is_live_) + --num_live_instances_; + } + + BaseCountedInstance& operator=(const BaseCountedInstance& x) + { + value_ = x.value_; + if (is_live_) + --num_live_instances_; + is_live_ = x.is_live_; + if (is_live_) + ++num_live_instances_; + ++num_copies_; + return *this; + } + BaseCountedInstance& operator=(BaseCountedInstance&& x) + { + value_ = x.value_; + if (is_live_) + --num_live_instances_; + is_live_ = x.is_live_; + x.is_live_ = false; + ++num_moves_; + return *this; + } + + bool operator==(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ == x.value_; + } + + bool operator!=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ != x.value_; + } + + bool operator<(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ < x.value_; + } + + bool operator>(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ > x.value_; + } + + bool operator<=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ <= x.value_; + } + + bool operator>=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ >= x.value_; + } + + absl::weak_ordering compare(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ < x.value_ ? absl::weak_ordering::less : value_ == x.value_ ? absl::weak_ordering::equivalent : + absl::weak_ordering::greater; + } + + int value() const + { + if (!is_live_) + std::abort(); + return value_; + } + + friend std::ostream& operator<<(std::ostream& o, const BaseCountedInstance& v) + { + return o << "[value:" << v.value() << "]"; + } + + // Implementation of efficient swap() that counts swaps. + static void SwapImpl( + BaseCountedInstance& lhs, // NOLINT(runtime/references) + BaseCountedInstance& rhs + ) + { // NOLINT(runtime/references) + using std::swap; + swap(lhs.value_, rhs.value_); + swap(lhs.is_live_, rhs.is_live_); + ++BaseCountedInstance::num_swaps_; + } + + private: + friend class InstanceTracker; + + int value_; + + // Indicates if the value is live, ie it hasn't been moved away from. + bool is_live_ = true; + + // Number of instances. + static int num_instances_; + + // Number of live instances (those that have not been moved away from.) + static int num_live_instances_; + + // Number of times that BaseCountedInstance objects were moved. + static int num_moves_; + + // Number of times that BaseCountedInstance objects were copied. + static int num_copies_; + + // Number of times that BaseCountedInstance objects were swapped. + static int num_swaps_; + + // Number of times that BaseCountedInstance objects were compared. + static int num_comparisons_; + }; + + // Helper to track the BaseCountedInstance instance counters. Expects that the + // number of instances and live_instances are the same when it is constructed + // and when it is destructed. + class InstanceTracker + { + public: + InstanceTracker() : + start_instances_(BaseCountedInstance::num_instances_), + start_live_instances_(BaseCountedInstance::num_live_instances_) + { + ResetCopiesMovesSwaps(); + } + ~InstanceTracker() + { + if (instances() != 0) + std::abort(); + if (live_instances() != 0) + std::abort(); + } + + // Returns the number of BaseCountedInstance instances both containing valid + // values and those moved away from compared to when the InstanceTracker was + // constructed + int instances() const + { + return BaseCountedInstance::num_instances_ - start_instances_; + } + + // Returns the number of live BaseCountedInstance instances compared to when + // the InstanceTracker was constructed + int live_instances() const + { + return BaseCountedInstance::num_live_instances_ - start_live_instances_; + } + + // Returns the number of moves on BaseCountedInstance objects since + // construction or since the last call to ResetCopiesMovesSwaps(). + int moves() const + { + return BaseCountedInstance::num_moves_ - start_moves_; + } + + // Returns the number of copies on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int copies() const + { + return BaseCountedInstance::num_copies_ - start_copies_; + } + + // Returns the number of swaps on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int swaps() const + { + return BaseCountedInstance::num_swaps_ - start_swaps_; + } + + // Returns the number of comparisons on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int comparisons() const + { + return BaseCountedInstance::num_comparisons_ - start_comparisons_; + } + + // Resets the base values for moves, copies, comparisons, and swaps to the + // current values, so that subsequent Get*() calls for moves, copies, + // comparisons, and swaps will compare to the situation at the point of this + // call. + void ResetCopiesMovesSwaps() + { + start_moves_ = BaseCountedInstance::num_moves_; + start_copies_ = BaseCountedInstance::num_copies_; + start_swaps_ = BaseCountedInstance::num_swaps_; + start_comparisons_ = BaseCountedInstance::num_comparisons_; + } + + private: + int start_instances_; + int start_live_instances_; + int start_moves_; + int start_copies_; + int start_swaps_; + int start_comparisons_; + }; + + // Copyable, not movable. + class CopyableOnlyInstance : public BaseCountedInstance + { + public: + explicit CopyableOnlyInstance(int x) : + BaseCountedInstance(x) + { + } + CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default; + CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default; + + friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return false; + } + }; + + // Copyable and movable. + class CopyableMovableInstance : public BaseCountedInstance + { + public: + explicit CopyableMovableInstance(int x) : + BaseCountedInstance(x) + { + } + CopyableMovableInstance(const CopyableMovableInstance& rhs) = default; + CopyableMovableInstance(CopyableMovableInstance&& rhs) = default; + CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) = + default; + CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default; + + friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return true; + } + }; + + // Only movable, not default-constructible. + class MovableOnlyInstance : public BaseCountedInstance + { + public: + explicit MovableOnlyInstance(int x) : + BaseCountedInstance(x) + { + } + MovableOnlyInstance(MovableOnlyInstance&& other) = default; + MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default; + + friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return true; + } + }; + + } // namespace test_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/tracked.h b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h new file mode 100644 index 00000000..84c9bd18 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h @@ -0,0 +1,106 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_ +#define ABSL_CONTAINER_INTERNAL_TRACKED_H_ + +#include + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // A class that tracks its copies and moves so that it can be queried in tests. + template + class Tracked + { + public: + Tracked() + { + } + // NOLINTNEXTLINE(runtime/explicit) + Tracked(const T& val) : + val_(val) + { + } + Tracked(const Tracked& that) : + val_(that.val_), + num_moves_(that.num_moves_), + num_copies_(that.num_copies_) + { + ++(*num_copies_); + } + Tracked(Tracked&& that) : + val_(std::move(that.val_)), + num_moves_(std::move(that.num_moves_)), + num_copies_(std::move(that.num_copies_)) + { + ++(*num_moves_); + } + Tracked& operator=(const Tracked& that) + { + val_ = that.val_; + num_moves_ = that.num_moves_; + num_copies_ = that.num_copies_; + ++(*num_copies_); + } + Tracked& operator=(Tracked&& that) + { + val_ = std::move(that.val_); + num_moves_ = std::move(that.num_moves_); + num_copies_ = std::move(that.num_copies_); + ++(*num_moves_); + } + + const T& val() const + { + return val_; + } + + friend bool operator==(const Tracked& a, const Tracked& b) + { + return a.val_ == b.val_; + } + friend bool operator!=(const Tracked& a, const Tracked& b) + { + return !(a == b); + } + + size_t num_copies() + { + return *num_copies_; + } + size_t num_moves() + { + return *num_moves_; + } + + private: + T val_; + std::shared_ptr num_moves_ = std::make_shared(0); + std::shared_ptr num_copies_ = std::make_shared(0); + }; + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h new file mode 100644 index 00000000..00248e19 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h @@ -0,0 +1,546 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ConstructorTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ConstructorTest); + + TYPED_TEST_P(ConstructorTest, NoArgs) + { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, BucketCount) + { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHash) + { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + template + struct is_std_unordered_map : std::false_type + { + }; + + template + struct is_std_unordered_map> : std::true_type + { + }; + +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) + using has_cxx14_std_apis = std::true_type; +#else + using has_cxx14_std_apis = std::false_type; +#endif + + template + using expect_cxx14_apis = + absl::disjunction>, has_cxx14_std_apis>; + + template + void BucketCountAllocTest(std::false_type) + { + } + + template + void BucketCountAllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountAlloc) + { + BucketCountAllocTest(expect_cxx14_apis()); + } + + template + void BucketCountHashAllocTest(std::false_type) + { + } + + template + void BucketCountHashAllocTest(std::true_type) + { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) + { + BucketCountHashAllocTest(expect_cxx14_apis()); + } + +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using has_alloc_std_constructors = std::true_type; +#else + using has_alloc_std_constructors = std::false_type; +#endif + + template + using expect_alloc_constructors = + absl::disjunction>, has_alloc_std_constructors>; + + template + void AllocTest(std::false_type) + { + } + + template + void AllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, Alloc) + { + AllocTest(expect_alloc_constructors()); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InputIteratorBucketAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) + { + InputIteratorBucketAllocTest(expect_cxx14_apis()); + } + + template + void InputIteratorBucketHashAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) + { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void CopyConstructorAllocTest(std::false_type) + { + } + + template + void CopyConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) + { + CopyConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on copy constructors. + + TYPED_TEST_P(ConstructorTest, MoveConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void MoveConstructorAllocTest(std::false_type) + { + } + + template + void MoveConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) + { + MoveConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on move constructors. + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InitializerListBucketAllocTest(std::false_type) + { + } + + template + void InitializerListBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) + { + InitializerListBucketAllocTest(expect_cxx14_apis()); + } + + template + void InitializerListBucketHashAllocTest(std::false_type) + { + } + + template + void InitializerListBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) + { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, Assignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + // TODO(alkis): Test [non-]propagating allocators on move/copy assignments + // (it depends on traits). + + TYPED_TEST_P(ConstructorTest, MoveAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + // We cannot test self move as standard states that it leaves standard + // containers in unspecified state (and in practice in causes memory-leak + // according to heap-checker!). + + REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf + ); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h new file mode 100644 index 00000000..17241b84 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h @@ -0,0 +1,125 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class LookupTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(LookupTest); + + TYPED_TEST_P(LookupTest, At) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + for (const auto& p : values) + { + const auto& val = m.at(p.first); + EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first); + } + } + + TYPED_TEST_P(LookupTest, OperatorBracket) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + { + auto& val = m[p.first]; + EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first); + val = p.second; + } + for (const auto& p : values) + EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first); + } + + TYPED_TEST_P(LookupTest, Count) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first); + } + + TYPED_TEST_P(LookupTest, Find) + { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_TRUE(m.end() == m.find(p.first)) + << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + { + auto it = m.find(p.first); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first); + EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first); + } + } + + TYPED_TEST_P(LookupTest, EqualRange) + { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + { + auto r = m.equal_range(p.first); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& p : values) + { + auto r = m.equal_range(p.first); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first); + } + } + + REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, EqualRange); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h new file mode 100644 index 00000000..631606d3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h @@ -0,0 +1,90 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ + +#include +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class MembersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(MembersTest); + + template + void UseType() + { + } + + TYPED_TEST_P(MembersTest, Typedefs) + { + EXPECT_TRUE((std::is_same, typename TypeParam::value_type>())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval() + )), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval() + )), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, typename TypeParam::const_pointer>()) + ); + } + + TYPED_TEST_P(MembersTest, SimpleFunctions) + { + EXPECT_GT(TypeParam().max_size(), 0); + } + + TYPED_TEST_P(MembersTest, BeginEnd) + { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); + } + + REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h new file mode 100644 index 00000000..fea5d502 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h @@ -0,0 +1,370 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ModifiersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ModifiersTest); + + TYPED_TEST_P(ModifiersTest, Clear) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); + } + + TYPED_TEST_P(ModifiersTest, Insert) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.insert(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, InsertHint) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.insert(it, val2); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, InsertRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + T val2 = {val.first, hash_internal::Generator()()}; + m.insert(val2); + EXPECT_EQ(m.bucket_count(), original_capacity); + } + + TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) + { +#if !defined(__GLIBCXX__) + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator()); + std::vector values; + while (values.size() != 100) + { + std::copy_n(base_values.begin(), 10, std::back_inserter(values)); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); +#endif + } + + TYPED_TEST_P(ModifiersTest, InsertOrAssign) + { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert_or_assign(k, val); + EXPECT_TRUE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val, get<1>(*p.first)); + V val2 = hash_internal::Generator()(); + p = m.insert_or_assign(k, val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val2, get<1>(*p.first)); +#endif + } + + TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) + { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert_or_assign(m.end(), k, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val, get<1>(*it)); + V val2 = hash_internal::Generator()(); + it = m.insert_or_assign(it, k, val2); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val2, get<1>(*it)); +#endif + } + + TYPED_TEST_P(ModifiersTest, Emplace) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.emplace(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, EmplaceHint) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.emplace_hint(it, val2); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, TryEmplace) + { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.try_emplace(val.first, val.second); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, val2.second); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +#endif + } + + TYPED_TEST_P(ModifiersTest, TryEmplaceHint) + { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.try_emplace(m.end(), val.first, val.second); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.try_emplace(it, val2.first, val2.second); + EXPECT_EQ(val, *it); +#endif + } + + template + using IfNotVoid = typename std::enable_if::value, V>::type; + + // In openmap we chose not to return the iterator from erase because that's + // more expensive. As such we adapt erase to return an iterator here. + struct EraseFirst + { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> + { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const + { + auto it = m->begin(); + m->erase(it++); + return it; + } + }; + + TYPED_TEST_P(ModifiersTest, Erase) + { + using T = hash_internal::GeneratedType; + using std::get; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto& first = *m.begin(); + std::vector values2; + for (const auto& val : values) + if (get<0>(val) != get<0>(first)) + values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end())); + } + + TYPED_TEST_P(ModifiersTest, EraseRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); + } + + TYPED_TEST_P(ModifiersTest, EraseKey) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0].first)); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end())); + } + + TYPED_TEST_P(ModifiersTest, Swap) + { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1)); + } + + // TODO(alkis): Write tests for extract. + // TODO(alkis): Write tests for merge. + + REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, InsertOrAssign, InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, Erase, EraseRange, EraseKey, Swap); + + template + struct is_unique_ptr : std::false_type + { + }; + + template + struct is_unique_ptr> : std::true_type + { + }; + + template + class UniquePtrModifiersTest : public ::testing::Test + { + protected: + UniquePtrModifiersTest() + { + static_assert(is_unique_ptr::value, "UniquePtrModifiersTyest may only be called with a " + "std::unique_ptr value type."); + } + }; + + GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest); + + TYPED_TEST_SUITE_P(UniquePtrModifiersTest); + + // Test that we do not move from rvalue arguments if an insertion does not + // happen. + TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) + { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.try_emplace(val.first, std::move(val.second)); + EXPECT_TRUE(p.second); + // A moved from std::unique_ptr is guaranteed to be nullptr. + EXPECT_EQ(val.second, nullptr); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, std::move(val2.second)); + EXPECT_FALSE(p.second); + EXPECT_NE(val2.second, nullptr); +#endif + } + + REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h new file mode 100644 index 00000000..44969960 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h @@ -0,0 +1,551 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ConstructorTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ConstructorTest); + + TYPED_TEST_P(ConstructorTest, NoArgs) + { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, BucketCount) + { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHash) + { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + + const auto& cm = m; + EXPECT_EQ(cm.hash_function(), hasher); + EXPECT_EQ(cm.key_eq(), equal); + EXPECT_EQ(cm.get_allocator(), alloc); + EXPECT_TRUE(cm.empty()); + EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre()); + EXPECT_GE(cm.bucket_count(), 123); + } + + template + struct is_std_unordered_set : std::false_type + { + }; + + template + struct is_std_unordered_set> : std::true_type + { + }; + +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) + using has_cxx14_std_apis = std::true_type; +#else + using has_cxx14_std_apis = std::false_type; +#endif + + template + using expect_cxx14_apis = + absl::disjunction>, has_cxx14_std_apis>; + + template + void BucketCountAllocTest(std::false_type) + { + } + + template + void BucketCountAllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountAlloc) + { + BucketCountAllocTest(expect_cxx14_apis()); + } + + template + void BucketCountHashAllocTest(std::false_type) + { + } + + template + void BucketCountHashAllocTest(std::true_type) + { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) + { + BucketCountHashAllocTest(expect_cxx14_apis()); + } + +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS + using has_alloc_std_constructors = std::true_type; +#else + using has_alloc_std_constructors = std::false_type; +#endif + + template + using expect_alloc_constructors = + absl::disjunction>, has_alloc_std_constructors>; + + template + void AllocTest(std::false_type) + { + } + + template + void AllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, Alloc) + { + AllocTest(expect_alloc_constructors()); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InputIteratorBucketAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) + { + InputIteratorBucketAllocTest(expect_cxx14_apis()); + } + + template + void InputIteratorBucketHashAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) + { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + EXPECT_NE(TypeParam(0, hasher, equal, alloc), n); + } + + template + void CopyConstructorAllocTest(std::false_type) + { + } + + template + void CopyConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) + { + CopyConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on copy constructors. + + TYPED_TEST_P(ConstructorTest, MoveConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void MoveConstructorAllocTest(std::false_type) + { + } + + template + void MoveConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) + { + MoveConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on move constructors. + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InitializerListBucketAllocTest(std::false_type) + { + } + + template + void InitializerListBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) + { + InitializerListBucketAllocTest(expect_cxx14_apis()); + } + + template + void InitializerListBucketHashAllocTest(std::false_type) + { + } + + template + void InitializerListBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) + { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + // TODO(alkis): Test [non-]propagating allocators on move/copy assignments + // (it depends on traits). + + TYPED_TEST_P(ConstructorTest, MoveAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign. + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf + ); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h new file mode 100644 index 00000000..7775f6dd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h @@ -0,0 +1,94 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class LookupTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(LookupTest); + + TYPED_TEST_P(LookupTest, Count) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v); + } + + TYPED_TEST_P(LookupTest, Find) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + { + typename TypeParam::iterator it = m.find(v); + static_assert(std::is_same::value, ""); + static_assert(std::is_same())>::value, ""); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v); + EXPECT_EQ(v, *it) << ::testing::PrintToString(v); + } + } + + TYPED_TEST_P(LookupTest, EqualRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + { + auto r = m.equal_range(v); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& v : values) + { + auto r = m.equal_range(v); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(v, *r.first); + } + } + + REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h new file mode 100644 index 00000000..ccc772f1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h @@ -0,0 +1,90 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ + +#include +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class MembersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(MembersTest); + + template + void UseType() + { + } + + TYPED_TEST_P(MembersTest, Typedefs) + { + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval() + )), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval() + )), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, typename TypeParam::const_pointer>()) + ); + } + + TYPED_TEST_P(MembersTest, SimpleFunctions) + { + EXPECT_GT(TypeParam().max_size(), 0); + } + + TYPED_TEST_P(MembersTest, BeginEnd) + { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); + } + + REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h new file mode 100644 index 00000000..92137f4f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h @@ -0,0 +1,231 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ModifiersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ModifiersTest); + + TYPED_TEST_P(ModifiersTest, Clear) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); + } + + TYPED_TEST_P(ModifiersTest, Insert) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.insert(val); + EXPECT_FALSE(p.second); + } + + TYPED_TEST_P(ModifiersTest, InsertHint) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + it = m.insert(it, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, InsertRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + } + + TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) + { +#if !defined(__GLIBCXX__) + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator()); + std::vector values; + while (values.size() != 100) + { + values.insert(values.end(), base_values.begin(), base_values.end()); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); +#endif + } + + TYPED_TEST_P(ModifiersTest, Emplace) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.emplace(val); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, EmplaceHint) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + it = m.emplace_hint(it, val); + EXPECT_EQ(val, *it); + } + + template + using IfNotVoid = typename std::enable_if::value, V>::type; + + // In openmap we chose not to return the iterator from erase because that's + // more expensive. As such we adapt erase to return an iterator here. + struct EraseFirst + { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> + { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const + { + auto it = m->begin(); + m->erase(it++); + return it; + } + }; + + TYPED_TEST_P(ModifiersTest, Erase) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + std::vector values2; + for (const auto& val : values) + if (val != *m.begin()) + values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end())); + } + + TYPED_TEST_P(ModifiersTest, EraseRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); + } + + TYPED_TEST_P(ModifiersTest, EraseKey) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0])); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end())); + } + + TYPED_TEST_P(ModifiersTest, Swap) + { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1)); + } + + // TODO(alkis): Write tests for extract. + // TODO(alkis): Write tests for merge. + + REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, Emplace, EmplaceHint, Erase, EraseRange, EraseKey, Swap); + + } // namespace container_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_map.h b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h new file mode 100644 index 00000000..cff4dc18 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h @@ -0,0 +1,620 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `node_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. However, if you need pointer stability and cannot store +// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a +// valid alternative. As well, if you are migrating your code from using +// `std::unordered_map`, a `node_hash_map` provides a more straightforward +// migration, because it guarantees pointer stability. Consider migrating to +// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_ +#define ABSL_CONTAINER_NODE_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + class NodeHashMapPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::node_hash_map + // ----------------------------------------------------------------------------- + // + // An `absl::node_hash_map` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_map` with + // the following notable differences: + // + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the map is provided a compatible heterogeneous + // hashing function and equality operator. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash map. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `node_hash_map` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `node_hash_map`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::node_hash_map` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // Example: + // + // // Create a node hash map of three strings (that map to strings) + // absl::node_hash_map ducks = + // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; + // + // // Insert a new element into the node hash map + // ducks.insert({"d", "donald"}}; + // + // // Force a rehash of the node hash map + // ducks.rehash(0); + // + // // Find the element with the key "b" + // std::string search_key = "b"; + // auto result = ducks.find(search_key); + // if (result != ducks.end()) { + // std::cout << "Result: " << result->second << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Alloc = std::allocator>> + class node_hash_map : public absl::container_internal::raw_hash_map, Hash, Eq, Alloc> + { + using Base = typename node_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A node_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_map map1; + // + // * Initializer List constructor + // + // absl::node_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::node_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::node_hash_map map7(v.begin(), v.end()); + node_hash_map() + { + } + using Base::Base; + + // node_hash_map::begin() + // + // Returns an iterator to the beginning of the `node_hash_map`. + using Base::begin; + + // node_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_map`. + using Base::cbegin; + + // node_hash_map::cend() + // + // Returns a const iterator to the end of the `node_hash_map`. + using Base::cend; + + // node_hash_map::end() + // + // Returns an iterator to the end of the `node_hash_map`. + using Base::end; + + // node_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_map`. + // + // NOTE: this member function is particular to `absl::node_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // node_hash_map::empty() + // + // Returns whether or not the `node_hash_map` is empty. + using Base::empty; + + // node_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `node_hash_map`. + using Base::max_size; + + // node_hash_map::size() + // + // Returns the number of elements currently within the `node_hash_map`. + using Base::size; + + // node_hash_map::clear() + // + // Removes all elements from the `node_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_map::erase() + // + // Erases elements within the `node_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_map::insert() + // + // Inserts an element of the specified value into the `node_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `node_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a `bool` denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair` + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a `bool` denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `node_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due to + // the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert_or_assign(const init_type& k, T&& obj): + // std::pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // node_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // node_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `node_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // node_hash_map::merge() + // + // Extracts elements from a given `source` node hash map into this + // `node_hash_map`. If the destination `node_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_map::swap(node_hash_map& other) + // + // Exchanges the contents of this `node_hash_map` with those of the `other` + // node hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_map::rehash(count) + // + // Rehashes the `node_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + using Base::rehash; + + // node_hash_map::reserve(count) + // + // Sets the number of slots in the `node_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // node_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `node_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // node_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `node_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `node_hash_map`. + using Base::count; + + // node_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_map`. + using Base::equal_range; + + // node_hash_map::find() + // + // Finds an element with the passed `key` within the `node_hash_map`. + using Base::find; + + // node_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `node_hash_map`, performing an `insert()` if the key does not already + // exist. If an insertion occurs and results in a rehashing of the container, + // all iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // node_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_map`. + using Base::bucket_count; + + // node_hash_map::load_factor() + // + // Returns the current load factor of the `node_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // node_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_map`. Overloads are + // listed below. + // + // float node_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_map`. + // + // void node_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_map::get_allocator() + // + // Returns the allocator function associated with this `node_hash_map`. + using Base::get_allocator; + + // node_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_map`. + using Base::hash_function; + + // node_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(node_hash_map<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename node_hash_map::size_type erase_if( + node_hash_map& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + class NodeHashMapPolicy : public absl::container_internal::node_slot_policy&, NodeHashMapPolicy> + { + using value_type = std::pair; + + public: + using key_type = Key; + using mapped_type = Value; + using init_type = std::pair; + + template + static value_type* new_element(Allocator* alloc, Args&&... args) + { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + value_type* res = + absl::allocator_traits::allocate(pair_alloc, 1); + absl::allocator_traits::construct(pair_alloc, res, std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, value_type* pair) + { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + absl::allocator_traits::destroy(pair_alloc, pair); + absl::allocator_traits::deallocate(pair_alloc, pair, 1); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposePair(std::forward(f), std::forward(args)...); + } + + static size_t element_space_used(const value_type*) + { + return sizeof(value_type); + } + + static Value& value(value_type* elem) + { + return elem->second; + } + static const Value& value(const value_type* elem) + { + return elem->second; + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer< + absl::node_hash_map> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_set.h b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h new file mode 100644 index 00000000..95a9fcbb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h @@ -0,0 +1,517 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `node_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash table should be a map of type +// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need +// pointer stability, a `node_hash_set` should be your preferred choice. As +// well, if you are migrating your code from using `std::unordered_set`, a +// `node_hash_set` should be an easy migration. Consider migrating to +// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_ +#define ABSL_CONTAINER_NODE_HASH_SET_H_ + +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct NodeHashSetPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::node_hash_set + // ----------------------------------------------------------------------------- + // + // An `absl::node_hash_set` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_set` with the + // following notable differences: + // + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the set is provided a compatible heterogeneous + // hashing function and equality operator. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash set. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `node_hash_set` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `node_hash_set`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::node_hash_set` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // Example: + // + // // Create a node hash set of three strings + // absl::node_hash_set ducks = + // {"huey", "dewey", "louie"}; + // + // // Insert a new element into the node hash set + // ducks.insert("donald"); + // + // // Force a rehash of the node hash set + // ducks.rehash(0); + // + // // See if "dewey" is present + // if (ducks.contains("dewey")) { + // std::cout << "We found dewey!" << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Alloc = std::allocator> + class node_hash_set : public absl::container_internal::raw_hash_set, Hash, Eq, Alloc> + { + using Base = typename node_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A node_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_set set1; + // + // * Initializer List constructor + // + // absl::node_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"}}; + // + // * Copy constructor + // + // absl::node_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::node_hash_set set7(v.begin(), v.end()); + node_hash_set() + { + } + using Base::Base; + + // node_hash_set::begin() + // + // Returns an iterator to the beginning of the `node_hash_set`. + using Base::begin; + + // node_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_set`. + using Base::cbegin; + + // node_hash_set::cend() + // + // Returns a const iterator to the end of the `node_hash_set`. + using Base::cend; + + // node_hash_set::end() + // + // Returns an iterator to the end of the `node_hash_set`. + using Base::end; + + // node_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_set`. + // + // NOTE: this member function is particular to `absl::node_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // node_hash_set::empty() + // + // Returns whether or not the `node_hash_set` is empty. + using Base::empty; + + // node_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `node_hash_set`. + using Base::max_size; + + // node_hash_set::size() + // + // Returns the number of elements currently within the `node_hash_set`. + using Base::size; + + // node_hash_set::clear() + // + // Removes all elements from the `node_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_set::erase() + // + // Erases elements within the `node_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_set` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_set::insert() + // + // Inserts an element of the specified value into the `node_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `node_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `node_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `node_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // node_hash_set::merge() + // + // Extracts elements from a given `source` node hash set into this + // `node_hash_set`. If the destination `node_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_set::swap(node_hash_set& other) + // + // Exchanges the contents of this `node_hash_set` with those of the `other` + // node hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash set's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_set::rehash(count) + // + // Rehashes the `node_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // node_hash_set::reserve(count) + // + // Sets the number of slots in the `node_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `node_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // node_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `node_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `node_hash_set`. + using Base::count; + + // node_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_set`. + using Base::equal_range; + + // node_hash_set::find() + // + // Finds an element with the passed `key` within the `node_hash_set`. + using Base::find; + + // node_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_set`. Note that + // because a node hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `node_hash_set`. + using Base::bucket_count; + + // node_hash_set::load_factor() + // + // Returns the current load factor of the `node_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // node_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_set`. Overloads are + // listed below. + // + // float node_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_set`. + // + // void node_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_set::get_allocator() + // + // Returns the allocator function associated with this `node_hash_set`. + using Base::get_allocator; + + // node_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_set`. + using Base::hash_function; + + // node_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(node_hash_set<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename node_hash_set::size_type erase_if( + node_hash_set& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct NodeHashSetPolicy : absl::container_internal::node_slot_policy> + { + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static T* new_element(Allocator* alloc, Args&&... args) + { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + T* res = absl::allocator_traits::allocate(value_alloc, 1); + absl::allocator_traits::construct(value_alloc, res, std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, T* elem) + { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + absl::allocator_traits::destroy(value_alloc, elem); + absl::allocator_traits::deallocate(value_alloc, elem, 1); + } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)... + ); + } + + static size_t element_space_used(const T*) + { + return sizeof(T); + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer> : std::true_type + { + }; + + } // namespace container_algorithm_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_NODE_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/crc32c.h b/CAPI/cpp/grpc/include/absl/crc/crc32c.h new file mode 100644 index 00000000..4542567c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/crc32c.h @@ -0,0 +1,201 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: crc32c.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for computing CRC32C values as checksums +// for arbitrary sequences of bytes provided as a string buffer. +// +// The API includes the basic functions for computing such CRC32C values and +// some utility functions for performing more efficient mathematical +// computations using an existing checksum. +#ifndef ABSL_CRC_CRC32C_H_ +#define ABSL_CRC_CRC32C_H_ + +#include +#include + +#include "absl/crc/internal/crc32c_inline.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + //----------------------------------------------------------------------------- + // crc32c_t + //----------------------------------------------------------------------------- + + // `crc32c_t` defines a strongly-typed integer for holding a CRC32C value. + // + // Some operators are intentionally omitted. Only equality operators are defined + // so that `crc32c_t` can be directly compared. Methods for putting `crc32c_t` + // directly into a set are omitted because this is bug-prone due to checksum + // collisions. Use an explicit conversion to the `uint32_t` space for operations + // that treat `crc32c_t` as an integer. + class crc32c_t final + { + public: + crc32c_t() = default; + constexpr explicit crc32c_t(uint32_t crc) : + crc_(crc) + { + } + + crc32c_t(const crc32c_t&) = default; + crc32c_t& operator=(const crc32c_t&) = default; + + explicit operator uint32_t() const + { + return crc_; + } + + friend bool operator==(crc32c_t lhs, crc32c_t rhs) + { + return static_cast(lhs) == static_cast(rhs); + } + + friend bool operator!=(crc32c_t lhs, crc32c_t rhs) + { + return !(lhs == rhs); + } + + template + friend void AbslStringify(Sink& sink, crc32c_t crc) + { + absl::Format(&sink, "%08x", static_cast(crc)); + } + + private: + uint32_t crc_; + }; + + namespace crc_internal + { + // Non-inline code path for `absl::ExtendCrc32c()`. Do not call directly. + // Call `absl::ExtendCrc32c()` (defined below) instead. + crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc, absl::string_view buf_to_add); + } // namespace crc_internal + + // ----------------------------------------------------------------------------- + // CRC32C Computation Functions + // ----------------------------------------------------------------------------- + + // ComputeCrc32c() + // + // Returns the CRC32C value of the provided string. + crc32c_t ComputeCrc32c(absl::string_view buf); + + // ExtendCrc32c() + // + // Computes a CRC32C value from an `initial_crc` CRC32C value including the + // `buf_to_add` bytes of an additional buffer. Using this function is more + // efficient than computing a CRC32C value for the combined buffer from + // scratch. + // + // Note: `ExtendCrc32c` with an initial_crc of 0 is equivalent to + // `ComputeCrc32c`. + // + // This operation has a runtime cost of O(`buf_to_add.size()`) + inline crc32c_t ExtendCrc32c(crc32c_t initial_crc, absl::string_view buf_to_add) + { + // Approximately 75% of calls have size <= 64. + if (buf_to_add.size() <= 64) + { + uint32_t crc = static_cast(initial_crc); + if (crc_internal::ExtendCrc32cInline(&crc, buf_to_add.data(), buf_to_add.size())) + { + return crc32c_t{crc}; + } + } + return crc_internal::ExtendCrc32cInternal(initial_crc, buf_to_add); + } + + // ExtendCrc32cByZeroes() + // + // Computes a CRC32C value for a buffer with an `initial_crc` CRC32C value, + // where `length` bytes with a value of 0 are appended to the buffer. Using this + // function is more efficient than computing a CRC32C value for the combined + // buffer from scratch. + // + // This operation has a runtime cost of O(log(`length`)) + crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length); + + // MemcpyCrc32c() + // + // Copies `src` to `dest` using `memcpy()` semantics, returning the CRC32C + // value of the copied buffer. + // + // Using `MemcpyCrc32c()` is potentially faster than performing the `memcpy()` + // and `ComputeCrc32c()` operations separately. + crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count, crc32c_t initial_crc = crc32c_t{0}); + + // ----------------------------------------------------------------------------- + // CRC32C Arithmetic Functions + // ----------------------------------------------------------------------------- + + // The following functions perform arithmetic on CRC32C values, which are + // generally more efficient than recalculating any given result's CRC32C value. + + // ConcatCrc32c() + // + // Calculates the CRC32C value of two buffers with known CRC32C values + // concatenated together. + // + // Given a buffer with CRC32C value `crc1` and a buffer with + // CRC32C value `crc2` and length, `crc2_length`, returns the CRC32C value of + // the concatenation of these two buffers. + // + // This operation has a runtime cost of O(log(`crc2_length`)). + crc32c_t ConcatCrc32c(crc32c_t crc1, crc32c_t crc2, size_t crc2_length); + + // RemoveCrc32cPrefix() + // + // Calculates the CRC32C value of an existing buffer with a series of bytes + // (the prefix) removed from the beginning of that buffer. + // + // Given the CRC32C value of an existing buffer, `full_string_crc`; The CRC32C + // value of a prefix of that buffer, `prefix_crc`; and the length of the buffer + // with the prefix removed, `remaining_string_length` , return the CRC32C + // value of the buffer with the prefix removed. + // + // This operation has a runtime cost of O(log(`remaining_string_length`)). + crc32c_t RemoveCrc32cPrefix(crc32c_t prefix_crc, crc32c_t full_string_crc, size_t remaining_string_length); + // RemoveCrc32cSuffix() + // + // Calculates the CRC32C value of an existing buffer with a series of bytes + // (the suffix) removed from the end of that buffer. + // + // Given a CRC32C value of an existing buffer `full_string_crc`, the CRC32C + // value of the suffix to remove `suffix_crc`, and the length of that suffix + // `suffix_len`, returns the CRC32C value of the buffer with suffix removed. + // + // This operation has a runtime cost of O(log(`suffix_len`)) + crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc, size_t suffix_length); + + // operator<< + // + // Streams the CRC32C value `crc` to the stream `os`. + inline std::ostream& operator<<(std::ostream& os, crc32c_t crc) + { + return os << absl::StreamFormat("%08x", static_cast(crc)); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_CRC32C_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/cpu_detect.h b/CAPI/cpp/grpc/include/absl/crc/internal/cpu_detect.h new file mode 100644 index 00000000..260121fe --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/cpu_detect.h @@ -0,0 +1,60 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CPU_DETECT_H_ +#define ABSL_CRC_INTERNAL_CPU_DETECT_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + // Enumeration of architectures that we have special-case tuning parameters for. + // This set may change over time. + enum class CpuType + { + kUnknown, + kIntelHaswell, + kAmdRome, + kAmdNaples, + kAmdMilan, + kIntelCascadelakeXeon, + kIntelSkylakeXeon, + kIntelBroadwell, + kIntelSkylake, + kIntelIvybridge, + kIntelSandybridge, + kIntelWestmere, + kArmNeoverseN1, + }; + + // Returns the type of host CPU this code is running on. Returns kUnknown if + // the host CPU is of unknown type, or if detection otherwise fails. + CpuType GetCpuType(); + + // Returns whether the host CPU supports the CPU features needed for our + // accelerated implementations. The CpuTypes enumerated above apart from + // kUnknown support the required features. On unknown CPUs, we can use + // this to see if it's safe to use hardware acceleration, though without any + // tuning. + bool SupportsArmCRC32PMULL(); + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CPU_DETECT_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc.h new file mode 100644 index 00000000..0561221d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc.h @@ -0,0 +1,85 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_H_ +#define ABSL_CRC_INTERNAL_CRC_H_ + +#include + +#include "absl/base/config.h" + +// This class implements CRCs (aka Rabin Fingerprints). +// Treats the input as a polynomial with coefficients in Z(2), +// and finds the remainder when divided by an primitive polynomial +// of the appropriate length. + +// A polynomial is represented by the bit pattern formed by its coefficients, +// but with the highest order bit not stored. +// The highest degree coefficient is stored in the lowest numbered bit +// in the lowest addressed byte. Thus, in what follows, the highest degree +// coefficient that is stored is in the low order bit of "lo" or "*lo". + +// Hardware acceleration is used when available. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + class CRC + { + public: + virtual ~CRC(); + + // If "*crc" is the CRC of bytestring A, place the CRC of + // the bytestring formed from the concatenation of A and the "length" + // bytes at "bytes" into "*crc". + virtual void Extend(uint32_t* crc, const void* bytes, size_t length) const = 0; + + // Equivalent to Extend(crc, bytes, length) where "bytes" + // points to an array of "length" zero bytes. + virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0; + + // Inverse operation of ExtendByZeroes. If `crc` is the CRC value of a string + // ending in `length` zero bytes, this returns a CRC value of that string + // with those zero bytes removed. + virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0; + + // Apply a non-linear transformation to "*crc" so that + // it is safe to CRC the result with the same polynomial without + // any reduction of error-detection ability in the outer CRC. + // Unscramble() performs the inverse transformation. + // It is strongly recommended that CRCs be scrambled before storage or + // transmission, and unscrambled at the other end before further manipulation. + virtual void Scramble(uint32_t* crc) const = 0; + virtual void Unscramble(uint32_t* crc) const = 0; + + // Crc32c() returns the singleton implementation of CRC for the CRC32C + // polynomial. Returns a handle that MUST NOT be destroyed with delete. + static CRC* Crc32c(); + + protected: + CRC(); // Clients may not call constructor; use Crc32c() instead. + + private: + CRC(const CRC&) = delete; + CRC& operator=(const CRC&) = delete; + }; + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc32_x86_arm_combined_simd.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc32_x86_arm_combined_simd.h new file mode 100644 index 00000000..96cba8d1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc32_x86_arm_combined_simd.h @@ -0,0 +1,325 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ +#define ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Many x86 and ARM machines have CRC acceleration hardware. +// We can do a faster version of Extend() on such machines. +// We define a translation layer for both x86 and ARM for the ease of use and +// most performance gains. + +// This implementation requires 64-bit CRC instructions (part of SSE 4.2) and +// PCLMULQDQ instructions. 32-bit builds with SSE 4.2 do exist, so the +// __x86_64__ condition is necessary. +#if defined(__x86_64__) && defined(__SSE4_2__) && defined(__PCLMUL__) + +#include +#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD + +#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__) + +// MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ. +#include +#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD + +#elif defined(__aarch64__) && defined(__LITTLE_ENDIAN__) && \ + defined(__ARM_FEATURE_CRC32) && defined(ABSL_INTERNAL_HAVE_ARM_NEON) && \ + defined(__ARM_FEATURE_CRYPTO) + +#include +#include +#define ABSL_CRC_INTERNAL_HAVE_ARM_SIMD + +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \ + defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) + using V128 = uint64x2_t; +#else + using V128 = __m128i; +#endif + + // Starting with the initial value in |crc|, accumulates a CRC32 value for + // unsigned integers of different sizes. + uint32_t CRC32_u8(uint32_t crc, uint8_t v); + + uint32_t CRC32_u16(uint32_t crc, uint16_t v); + + uint32_t CRC32_u32(uint32_t crc, uint32_t v); + + uint32_t CRC32_u64(uint32_t crc, uint64_t v); + + // Loads 128 bits of integer data. |src| must be 16-byte aligned. + V128 V128_Load(const V128* src); + + // Load 128 bits of integer data. |src| does not need to be aligned. + V128 V128_LoadU(const V128* src); + + // Polynomially multiplies the high 64 bits of |l| and |r|. + V128 V128_PMulHi(const V128 l, const V128 r); + + // Polynomially multiplies the low 64 bits of |l| and |r|. + V128 V128_PMulLow(const V128 l, const V128 r); + + // Polynomially multiplies the low 64 bits of |r| and high 64 bits of |l|. + V128 V128_PMul01(const V128 l, const V128 r); + + // Polynomially multiplies the low 64 bits of |l| and high 64 bits of |r|. + V128 V128_PMul10(const V128 l, const V128 r); + + // Produces a XOR operation of |l| and |r|. + V128 V128_Xor(const V128 l, const V128 r); + + // Produces an AND operation of |l| and |r|. + V128 V128_And(const V128 l, const V128 r); + + // Sets two 64 bit integers to one 128 bit vector. The order is reverse. + // dst[63:0] := |r| + // dst[127:64] := |l| + V128 V128_From2x64(const uint64_t l, const uint64_t r); + + // Shift |l| right by |imm| bytes while shifting in zeros. + template + V128 V128_ShiftRight(const V128 l); + + // Extracts a 32-bit integer from |l|, selected with |imm|. + template + int V128_Extract32(const V128 l); + + // Extracts the low 64 bits from V128. + int64_t V128_Low64(const V128 l); + + // Left-shifts packed 64-bit integers in l by r. + V128 V128_ShiftLeft64(const V128 l, const V128 r); + +#endif + +#if defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + + inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) + { + return _mm_crc32_u8(crc, v); + } + + inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) + { + return _mm_crc32_u16(crc, v); + } + + inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) + { + return _mm_crc32_u32(crc, v); + } + + inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) + { + return static_cast(_mm_crc32_u64(crc, v)); + } + + inline V128 V128_Load(const V128* src) + { + return _mm_load_si128(src); + } + + inline V128 V128_LoadU(const V128* src) + { + return _mm_loadu_si128(src); + } + + inline V128 V128_PMulHi(const V128 l, const V128 r) + { + return _mm_clmulepi64_si128(l, r, 0x11); + } + + inline V128 V128_PMulLow(const V128 l, const V128 r) + { + return _mm_clmulepi64_si128(l, r, 0x00); + } + + inline V128 V128_PMul01(const V128 l, const V128 r) + { + return _mm_clmulepi64_si128(l, r, 0x01); + } + + inline V128 V128_PMul10(const V128 l, const V128 r) + { + return _mm_clmulepi64_si128(l, r, 0x10); + } + + inline V128 V128_Xor(const V128 l, const V128 r) + { + return _mm_xor_si128(l, r); + } + + inline V128 V128_And(const V128 l, const V128 r) + { + return _mm_and_si128(l, r); + } + + inline V128 V128_From2x64(const uint64_t l, const uint64_t r) + { + return _mm_set_epi64x(static_cast(l), static_cast(r)); + } + + template + inline V128 V128_ShiftRight(const V128 l) + { + return _mm_srli_si128(l, imm); + } + + template + inline int V128_Extract32(const V128 l) + { + return _mm_extract_epi32(l, imm); + } + + inline int64_t V128_Low64(const V128 l) + { + return _mm_cvtsi128_si64(l); + } + + inline V128 V128_ShiftLeft64(const V128 l, const V128 r) + { + return _mm_sll_epi64(l, r); + } + +#elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) + + inline uint32_t CRC32_u8(uint32_t crc, uint8_t v) + { + return __crc32cb(crc, v); + } + + inline uint32_t CRC32_u16(uint32_t crc, uint16_t v) + { + return __crc32ch(crc, v); + } + + inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) + { + return __crc32cw(crc, v); + } + + inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) + { + return __crc32cd(crc, v); + } + + inline V128 V128_Load(const V128* src) + { + return vld1q_u64(reinterpret_cast(src)); + } + + inline V128 V128_LoadU(const V128* src) + { + return vld1q_u64(reinterpret_cast(src)); + } + + // Using inline assembly as clang does not generate the pmull2 instruction and + // performance drops by 15-20%. + // TODO(b/193678732): Investigate why the compiler decides not to generate + // such instructions and why it becomes so much worse. + inline V128 V128_PMulHi(const V128 l, const V128 r) + { + uint64x2_t res; + __asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" + : "=w"(res) + : "w"(l), "w"(r)); + return res; + } + + inline V128 V128_PMulLow(const V128 l, const V128 r) + { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(r))) + )); + } + + inline V128 V128_PMul01(const V128 l, const V128 r) + { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_high_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(r))) + )); + } + + inline V128 V128_PMul10(const V128 l, const V128 r) + { + return reinterpret_cast(vmull_p64( + reinterpret_cast(vget_low_p64(vreinterpretq_p64_u64(l))), + reinterpret_cast(vget_high_p64(vreinterpretq_p64_u64(r))) + )); + } + + inline V128 V128_Xor(const V128 l, const V128 r) + { + return veorq_u64(l, r); + } + + inline V128 V128_And(const V128 l, const V128 r) + { + return vandq_u64(l, r); + } + + inline V128 V128_From2x64(const uint64_t l, const uint64_t r) + { + return vcombine_u64(vcreate_u64(r), vcreate_u64(l)); + } + + template + inline V128 V128_ShiftRight(const V128 l) + { + return vreinterpretq_u64_s8( + vextq_s8(vreinterpretq_s8_u64(l), vdupq_n_s8(0), imm) + ); + } + + template + inline int V128_Extract32(const V128 l) + { + return vgetq_lane_s32(vreinterpretq_s32_u64(l), imm); + } + + inline int64_t V128_Low64(const V128 l) + { + return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0); + } + + inline V128 V128_ShiftLeft64(const V128 l, const V128 r) + { + return vshlq_u64(l, vreinterpretq_s64_u64(r)); + } + +#endif + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32_X86_ARM_COMBINED_SIMD_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc32c.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc32c.h new file mode 100644 index 00000000..644a092a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc32c.h @@ -0,0 +1,41 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32C_H_ +#define ABSL_CRC_INTERNAL_CRC32C_H_ + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + // Modifies a CRC32 value by removing `length` bytes with a value of 0 from + // the end of the string. + // + // This is the inverse operation of ExtendCrc32cByZeroes(). + // + // This operation has a runtime cost of O(log(`length`)) + // + // Internal implementation detail, exposed for testing only. + crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length); + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32C_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc32c_inline.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc32c_inline.h new file mode 100644 index 00000000..57d9c35c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc32c_inline.h @@ -0,0 +1,79 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ +#define ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/crc/internal/crc32_x86_arm_combined_simd.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + // CRC32C implementation optimized for small inputs. + // Either computes crc and return true, or if there is + // no hardware support does nothing and returns false. + inline bool ExtendCrc32cInline(uint32_t* crc, const char* p, size_t n) + { +#if defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || \ + defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + constexpr uint32_t kCrc32Xor = 0xffffffffU; + *crc ^= kCrc32Xor; + if (n & 1) + { + *crc = CRC32_u8(*crc, static_cast(*p)); + n--; + p++; + } + if (n & 2) + { + *crc = CRC32_u16(*crc, absl::little_endian::Load16(p)); + n -= 2; + p += 2; + } + if (n & 4) + { + *crc = CRC32_u32(*crc, absl::little_endian::Load32(p)); + n -= 4; + p += 4; + } + while (n) + { + *crc = CRC32_u64(*crc, absl::little_endian::Load64(p)); + n -= 8; + p += 8; + } + *crc ^= kCrc32Xor; + return true; +#else + // No hardware support, signal the need to fallback. + static_cast(crc); + static_cast(p); + static_cast(n); + return false; +#endif // defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD) || + // defined(ABSL_CRC_INTERNAL_HAVE_X86_SIMD) + } + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC32C_INLINE_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc_cord_state.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc_cord_state.h new file mode 100644 index 00000000..9c98411a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc_cord_state.h @@ -0,0 +1,182 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_ +#define ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + // CrcCordState is a copy-on-write class that holds the chunked CRC32C data + // that allows CrcCord to perform efficient substring operations. CrcCordState + // is used as a member variable in CrcCord. When a CrcCord is converted to a + // Cord, the CrcCordState is shallow-copied into the root node of the Cord. If + // the converted Cord is modified outside of CrcCord, the CrcCordState is + // discarded from the Cord. If the Cord is converted back to a CrcCord, and the + // Cord is still carrying the CrcCordState in its root node, the CrcCord can + // re-use the CrcCordState, making the construction of the CrcCord cheap. + // + // CrcCordState does not try to encapsulate the CRC32C state (CrcCord requires + // knowledge of how CrcCordState represents the CRC32C state). It does + // encapsulate the copy-on-write nature of the state. + class CrcCordState + { + public: + // Constructors. + CrcCordState(); + CrcCordState(const CrcCordState&); + CrcCordState(CrcCordState&&); + + // Destructor. Atomically unreferences the data. + ~CrcCordState(); + + // Copy and move operators. + CrcCordState& operator=(const CrcCordState&); + CrcCordState& operator=(CrcCordState&&); + + // A (length, crc) pair. + struct PrefixCrc + { + PrefixCrc() = default; + PrefixCrc(size_t length_arg, absl::crc32c_t crc_arg) : + length(length_arg), + crc(crc_arg) + { + } + + size_t length = 0; + + // TODO(absl-team): Memory stomping often zeros out memory. If this struct + // gets overwritten, we could end up with {0, 0}, which is the correct CRC + // for a string of length 0. Consider storing a scrambled value and + // unscrambling it before verifying it. + absl::crc32c_t crc = absl::crc32c_t{0}; + }; + + // The representation of the chunked CRC32C data. + struct Rep + { + // `removed_prefix` is the crc and length of any prefix that has been + // removed from the Cord (for example, by calling + // `CrcCord::RemovePrefix()`). To get the checksum of any prefix of the + // cord, this value must be subtracted from `prefix_crc`. See `Checksum()` + // for an example. + // + // CrcCordState is said to be "normalized" if removed_prefix.length == 0. + PrefixCrc removed_prefix; + + // A deque of (length, crc) pairs, representing length and crc of a prefix + // of the Cord, before removed_prefix has been subtracted. The lengths of + // the prefixes are stored in increasing order. If the Cord is not empty, + // the last value in deque is the contains the CRC32C of the entire Cord + // when removed_prefix is subtracted from it. + std::deque prefix_crc; + }; + + // Returns a reference to the representation of the chunked CRC32C data. + const Rep& rep() const + { + return refcounted_rep_->rep; + } + + // Returns a mutable reference to the representation of the chunked CRC32C + // data. Calling this function will copy the data if another instance also + // holds a reference to the data, so it is important to call rep() instead if + // the data may not be mutated. + Rep* mutable_rep() + { + if (refcounted_rep_->count.load(std::memory_order_acquire) != 1) + { + RefcountedRep* copy = new RefcountedRep; + copy->rep = refcounted_rep_->rep; + Unref(refcounted_rep_); + refcounted_rep_ = copy; + } + return &refcounted_rep_->rep; + } + + // Returns the CRC32C of the entire Cord. + absl::crc32c_t Checksum() const; + + // Returns true if the chunked CRC32C cached is normalized. + bool IsNormalized() const + { + return rep().removed_prefix.length == 0; + } + + // Normalizes the chunked CRC32C checksum cache by subtracting any removed + // prefix from the chunks. + void Normalize(); + + // Returns the number of cached chunks. + size_t NumChunks() const + { + return rep().prefix_crc.size(); + } + + // Helper that returns the (length, crc) of the `n`-th cached chunked. + PrefixCrc NormalizedPrefixCrcAtNthChunk(size_t n) const; + + // Poisons all chunks to so that Checksum() will likely be incorrect with high + // probability. + void Poison(); + + private: + struct RefcountedRep + { + std::atomic count{1}; + Rep rep; + }; + + // Adds a reference to the shared global empty `RefcountedRep`, and returns a + // pointer to the `RefcountedRep`. This is an optimization to avoid unneeded + // allocations when the allocation is unlikely to ever be used. The returned + // pointer can be `Unref()`ed when it is no longer needed. Since the returned + // instance will always have a reference counter greater than 1, attempts to + // modify it (by calling `mutable_rep()`) will create a new unshared copy. + static RefcountedRep* RefSharedEmptyRep(); + + static void Ref(RefcountedRep* r) + { + assert(r != nullptr); + r->count.fetch_add(1, std::memory_order_relaxed); + } + + static void Unref(RefcountedRep* r) + { + assert(r != nullptr); + if (r->count.fetch_sub(1, std::memory_order_acq_rel) == 1) + { + delete r; + } + } + + RefcountedRep* refcounted_rep_; + }; + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc_internal.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc_internal.h new file mode 100644 index 00000000..ee544739 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc_internal.h @@ -0,0 +1,181 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ +#define ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ + +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/crc/internal/crc.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace crc_internal + { + + // Prefetch constants used in some Extend() implementations + constexpr int kPrefetchHorizon = ABSL_CACHELINE_SIZE * 4; // Prefetch this far + // Shorter prefetch distance for smaller buffers + constexpr int kPrefetchHorizonMedium = ABSL_CACHELINE_SIZE * 1; + static_assert(kPrefetchHorizon >= 64, "CRCPrefetchHorizon less than loop len"); + + // We require the Scramble() function: + // - to be reversible (Unscramble() must exist) + // - to be non-linear in the polynomial's Galois field (so the CRC of a + // scrambled CRC is not linearly affected by the scrambled CRC, even if + // using the same polynomial) + // - not to be its own inverse. Preferably, if X=Scramble^N(X) and N!=0, then + // N is large. + // - to be fast. + // - not to change once defined. + // We introduce non-linearity in two ways: + // Addition of a constant. + // - The carries introduce non-linearity; we use bits of an irrational + // (phi) to make it unlikely that we introduce no carries. + // Rotate by a constant number of bits. + // - We use floor(degree/2)+1, which does not divide the degree, and + // splits the bits nearly evenly, which makes it less likely the + // halves will be the same or one will be all zeroes. + // We do both things to improve the chances of non-linearity in the face of + // bit patterns with low numbers of bits set, while still being fast. + // Below is the constant that we add. The bits are the first 128 bits of the + // fractional part of phi, with a 1 ored into the bottom bit to maximize the + // cycle length of repeated adds. + constexpr uint64_t kScrambleHi = (static_cast(0x4f1bbcdcU) << 32) | + static_cast(0xbfa53e0aU); + constexpr uint64_t kScrambleLo = (static_cast(0xf9ce6030U) << 32) | + static_cast(0x2e76e41bU); + + class CRCImpl : public CRC + { // Implementation of the abstract class CRC + public: + using Uint32By256 = uint32_t[256]; + + CRCImpl() = default; + ~CRCImpl() override = default; + + // The internal version of CRC::New(). + static CRCImpl* NewInternal(); + + // Fill in a table for updating a CRC by one word of 'word_size' bytes + // [last_lo, last_hi] contains the answer if the last bit in the word + // is set. + static void FillWordTable(uint32_t poly, uint32_t last, int word_size, Uint32By256* t); + + // Build the table for extending by zeroes, returning the number of entries. + // For a in {1, 2, ..., ZEROES_BASE-1}, b in {0, 1, 2, 3, ...}, + // entry j=a-1+(ZEROES_BASE-1)*b + // contains a polynomial Pi such that multiplying + // a CRC by Pi mod P, where P is the CRC polynomial, is equivalent to + // appending a*2**(ZEROES_BASE_LG*b) zero bytes to the original string. + static int FillZeroesTable(uint32_t poly, Uint32By256* t); + + virtual void InitTables() = 0; + + private: + CRCImpl(const CRCImpl&) = delete; + CRCImpl& operator=(const CRCImpl&) = delete; + }; + + // This is the 32-bit implementation. It handles all sizes from 8 to 32. + class CRC32 : public CRCImpl + { + public: + CRC32() = default; + ~CRC32() override = default; + + void Extend(uint32_t* crc, const void* bytes, size_t length) const override; + void ExtendByZeroes(uint32_t* crc, size_t length) const override; + void Scramble(uint32_t* crc) const override; + void Unscramble(uint32_t* crc) const override; + void UnextendByZeroes(uint32_t* crc, size_t length) const override; + + void InitTables() override; + + private: + // Common implementation guts for ExtendByZeroes and UnextendByZeroes(). + // + // zeroes_table is a table as returned by FillZeroesTable(), containing + // polynomials representing CRCs of strings-of-zeros of various lengths, + // and which can be combined by polynomial multiplication. poly_table is + // a table of CRC byte extension values. These tables are determined by + // the generator polynomial. + // + // These will be set to reverse_zeroes_ and reverse_table0_ for Unextend, and + // CRC32::zeroes_ and CRC32::table0_ for Extend. + static void ExtendByZeroesImpl(uint32_t* crc, size_t length, const uint32_t zeroes_table[256], const uint32_t poly_table[256]); + + uint32_t table0_[256]; // table of byte extensions + uint32_t zeroes_[256]; // table of zero extensions + + // table of 4-byte extensions shifted by 12 bytes of zeroes + uint32_t table_[4][256]; + + // Reverse lookup tables, using the alternate polynomial used by + // UnextendByZeroes(). + uint32_t reverse_table0_[256]; // table of reverse byte extensions + uint32_t reverse_zeroes_[256]; // table of reverse zero extensions + + CRC32(const CRC32&) = delete; + CRC32& operator=(const CRC32&) = delete; + }; + + // Helpers + + // Return a bit mask containing len 1-bits. + // Requires 0 < len <= sizeof(T) + template + T MaskOfLength(int len) + { + // shift 2 by len-1 rather than 1 by len because shifts of wordsize + // are undefined. + return (T(2) << (len - 1)) - 1; + } + + // Rotate low-order "width" bits of "in" right by "r" bits, + // setting other bits in word to arbitrary values. + template + T RotateRight(T in, int width, int r) + { + return (in << (width - r)) | ((in >> r) & MaskOfLength(width - r)); + } + + // RoundUp(p) returns the lowest address >= p aligned to an N-byte + // boundary. Requires that N is a power of 2. + template + const uint8_t* RoundUp(const uint8_t* p) + { + static_assert((alignment & (alignment - 1)) == 0, "alignment is not 2^n"); + constexpr uintptr_t mask = alignment - 1; + const uintptr_t as_uintptr = reinterpret_cast(p); + return reinterpret_cast((as_uintptr + mask) & ~mask); + } + + // Return a newly created CRC32AcceleratedX86ARMCombined if we can use Intel's + // or ARM's CRC acceleration for a given polynomial. Return nullptr otherwise. + CRCImpl* TryNewCRC32AcceleratedX86ARMCombined(); + + // Return all possible hardware accelerated implementations. For testing only. + std::vector> NewCRC32AcceleratedX86ARMCombinedAll(); + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/crc_memcpy.h b/CAPI/cpp/grpc/include/absl/crc/internal/crc_memcpy.h new file mode 100644 index 00000000..4f16dbac --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/crc_memcpy.h @@ -0,0 +1,119 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ +#define ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/crc/crc32c.h" + +// Defined if the class AcceleratedCrcMemcpyEngine exists. +#if defined(__x86_64__) && defined(__SSE4_2__) +#define ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE 1 +#elif defined(_MSC_VER) && defined(__AVX__) +#define ABSL_INTERNAL_HAVE_X86_64_ACCELERATED_CRC_MEMCPY_ENGINE 1 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + class CrcMemcpyEngine + { + public: + virtual ~CrcMemcpyEngine() = default; + + virtual crc32c_t Compute(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc) const = 0; + + protected: + CrcMemcpyEngine() = default; + }; + + class CrcMemcpy + { + public: + static crc32c_t CrcAndCopy(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc = crc32c_t{0}, bool non_temporal = false) + { + static const ArchSpecificEngines engines = GetArchSpecificEngines(); + auto* engine = non_temporal ? engines.non_temporal : engines.temporal; + return engine->Compute(dst, src, length, initial_crc); + } + + // For testing only: get an architecture-specific engine for tests. + static std::unique_ptr GetTestEngine(int vector, int integer); + + private: + struct ArchSpecificEngines + { + CrcMemcpyEngine* temporal; + CrcMemcpyEngine* non_temporal; + }; + + static ArchSpecificEngines GetArchSpecificEngines(); + }; + + // Fallback CRC-memcpy engine. + class FallbackCrcMemcpyEngine : public CrcMemcpyEngine + { + public: + FallbackCrcMemcpyEngine() = default; + FallbackCrcMemcpyEngine(const FallbackCrcMemcpyEngine&) = delete; + FallbackCrcMemcpyEngine operator=(const FallbackCrcMemcpyEngine&) = delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc) const override; + }; + + // CRC Non-Temporal-Memcpy engine. + class CrcNonTemporalMemcpyEngine : public CrcMemcpyEngine + { + public: + CrcNonTemporalMemcpyEngine() = default; + CrcNonTemporalMemcpyEngine(const CrcNonTemporalMemcpyEngine&) = delete; + CrcNonTemporalMemcpyEngine operator=(const CrcNonTemporalMemcpyEngine&) = + delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc) const override; + }; + + // CRC Non-Temporal-Memcpy AVX engine. + class CrcNonTemporalMemcpyAVXEngine : public CrcMemcpyEngine + { + public: + CrcNonTemporalMemcpyAVXEngine() = default; + CrcNonTemporalMemcpyAVXEngine(const CrcNonTemporalMemcpyAVXEngine&) = delete; + CrcNonTemporalMemcpyAVXEngine operator=( + const CrcNonTemporalMemcpyAVXEngine& + ) = delete; + + crc32c_t Compute(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc) const override; + }; + + // Copy source to destination and return the CRC32C of the data copied. If an + // accelerated version is available, use the accelerated version, otherwise use + // the generic fallback version. + inline crc32c_t Crc32CAndCopy(void* __restrict dst, const void* __restrict src, std::size_t length, crc32c_t initial_crc = crc32c_t{0}, bool non_temporal = false) + { + return CrcMemcpy::CrcAndCopy(dst, src, length, initial_crc, non_temporal); + } + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_CRC_MEMCPY_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_arm_intrinsics.h b/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_arm_intrinsics.h new file mode 100644 index 00000000..6d3dba2a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_arm_intrinsics.h @@ -0,0 +1,78 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ +#define ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ + +#include "absl/base/config.h" + +#ifdef __aarch64__ +#include + +typedef int64x2_t __m128i; /* 128-bit vector containing integers */ +#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x) +#define vreinterpretq_s64_m128i(x) (x) + +// Guarantees that every preceding store is globally visible before any +// subsequent store. +// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx +static inline __attribute__((always_inline)) void _mm_sfence(void) +{ + __sync_synchronize(); +} + +// Load 128-bits of integer data from unaligned memory into dst. This intrinsic +// may perform better than _mm_loadu_si128 when the data crosses a cache line +// boundary. +// +// dst[127:0] := MEM[mem_addr+127:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128 +#define _mm_lddqu_si128 _mm_loadu_si128 + +// Loads 128-bit value. : +// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx +static inline __attribute__((always_inline)) __m128i _mm_loadu_si128( + const __m128i* p +) +{ + return vreinterpretq_m128i_s32(vld1q_s32((const int32_t*)p)); +} + +// Stores the data in a to the address p without polluting the caches. If the +// cache line containing address p is already in the cache, the cache will be +// updated. +// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx +static inline __attribute__((always_inline)) void _mm_stream_si128(__m128i* p, __m128i a) +{ +#if ABSL_HAVE_BUILTIN(__builtin_nontemporal_store) + __builtin_nontemporal_store(a, p); +#else + vst1q_s64((int64_t*)p, vreinterpretq_s64_m128i(a)); +#endif +} + +// Sets the 16 signed 8-bit integer values. +// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx +static inline __attribute__((always_inline)) __m128i _mm_set_epi8( + signed char b15, signed char b14, signed char b13, signed char b12, signed char b11, signed char b10, signed char b9, signed char b8, signed char b7, signed char b6, signed char b5, signed char b4, signed char b3, signed char b2, signed char b1, signed char b0 +) +{ + int8_t __attribute__((aligned(16))) + data[16] = {(int8_t)b0, (int8_t)b1, (int8_t)b2, (int8_t)b3, (int8_t)b4, (int8_t)b5, (int8_t)b6, (int8_t)b7, (int8_t)b8, (int8_t)b9, (int8_t)b10, (int8_t)b11, (int8_t)b12, (int8_t)b13, (int8_t)b14, (int8_t)b15}; + return (__m128i)vld1q_s8(data); +} +#endif // __aarch64__ + +#endif // ABSL_CRC_INTERNAL_NON_TEMPORAL_ARM_INTRINSICS_H_ diff --git a/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_memcpy.h b/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_memcpy.h new file mode 100644 index 00000000..9dcabb1f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/crc/internal/non_temporal_memcpy.h @@ -0,0 +1,189 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ +#define ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ + +#ifdef _MSC_VER +#include +#endif + +#ifdef __SSE__ +#include +#endif + +#ifdef __SSE2__ +#include +#endif + +#ifdef __SSE3__ +#include +#endif + +#ifdef __AVX__ +#include +#endif + +#ifdef __aarch64__ +#include "absl/crc/internal/non_temporal_arm_intrinsics.h" +#endif + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace crc_internal + { + + // This non-temporal memcpy does regular load and non-temporal store memory + // copy. It is compatible to both 16-byte aligned and unaligned addresses. If + // data at the destination is not immediately accessed, using non-temporal + // memcpy can save 1 DRAM load of the destination cacheline. + constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE; + + // If the objects overlap, the behavior is undefined. + inline void* non_temporal_store_memcpy(void* __restrict dst, const void* __restrict src, size_t len) + { +#if defined(__SSE3__) || defined(__aarch64__) || \ + (defined(_MSC_VER) && defined(__AVX__)) + // This implementation requires SSE3. + // MSVC cannot target SSE3 directly, but when MSVC targets AVX, + // SSE3 support is implied. + uint8_t* d = reinterpret_cast(dst); + const uint8_t* s = reinterpret_cast(src); + + // memcpy() the misaligned header. At the end of this if block, is + // aligned to a 64-byte cacheline boundary or == 0. + if (reinterpret_cast(d) & (kCacheLineSize - 1)) + { + uintptr_t bytes_before_alignment_boundary = + kCacheLineSize - + (reinterpret_cast(d) & (kCacheLineSize - 1)); + size_t header_len = (std::min)(bytes_before_alignment_boundary, len); + assert(bytes_before_alignment_boundary < kCacheLineSize); + memcpy(d, s, header_len); + d += header_len; + s += header_len; + len -= header_len; + } + + if (len >= kCacheLineSize) + { + _mm_sfence(); + __m128i* dst_cacheline = reinterpret_cast<__m128i*>(d); + const __m128i* src_cacheline = reinterpret_cast(s); + constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m128i); + size_t loops = len / kCacheLineSize; + + while (len >= kCacheLineSize) + { + __m128i temp1, temp2, temp3, temp4; + temp1 = _mm_lddqu_si128(src_cacheline + 0); + temp2 = _mm_lddqu_si128(src_cacheline + 1); + temp3 = _mm_lddqu_si128(src_cacheline + 2); + temp4 = _mm_lddqu_si128(src_cacheline + 3); + _mm_stream_si128(dst_cacheline + 0, temp1); + _mm_stream_si128(dst_cacheline + 1, temp2); + _mm_stream_si128(dst_cacheline + 2, temp3); + _mm_stream_si128(dst_cacheline + 3, temp4); + src_cacheline += kOpsPerCacheLine; + dst_cacheline += kOpsPerCacheLine; + len -= kCacheLineSize; + } + d += loops * kCacheLineSize; + s += loops * kCacheLineSize; + _mm_sfence(); + } + + // memcpy the tail. + if (len) + { + memcpy(d, s, len); + } + return dst; +#else + // Fallback to regular memcpy. + return memcpy(dst, src, len); +#endif // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__) + } + + inline void* non_temporal_store_memcpy_avx(void* __restrict dst, const void* __restrict src, size_t len) + { +#ifdef __AVX__ + uint8_t* d = reinterpret_cast(dst); + const uint8_t* s = reinterpret_cast(src); + + // memcpy() the misaligned header. At the end of this if block, is + // aligned to a 64-byte cacheline boundary or == 0. + if (reinterpret_cast(d) & (kCacheLineSize - 1)) + { + uintptr_t bytes_before_alignment_boundary = + kCacheLineSize - + (reinterpret_cast(d) & (kCacheLineSize - 1)); + size_t header_len = (std::min)(bytes_before_alignment_boundary, len); + assert(bytes_before_alignment_boundary < kCacheLineSize); + memcpy(d, s, header_len); + d += header_len; + s += header_len; + len -= header_len; + } + + if (len >= kCacheLineSize) + { + _mm_sfence(); + __m256i* dst_cacheline = reinterpret_cast<__m256i*>(d); + const __m256i* src_cacheline = reinterpret_cast(s); + constexpr int kOpsPerCacheLine = kCacheLineSize / sizeof(__m256i); + size_t loops = len / kCacheLineSize; + + while (len >= kCacheLineSize) + { + __m256i temp1, temp2; + temp1 = _mm256_lddqu_si256(src_cacheline + 0); + temp2 = _mm256_lddqu_si256(src_cacheline + 1); + _mm256_stream_si256(dst_cacheline + 0, temp1); + _mm256_stream_si256(dst_cacheline + 1, temp2); + src_cacheline += kOpsPerCacheLine; + dst_cacheline += kOpsPerCacheLine; + len -= kCacheLineSize; + } + d += loops * kCacheLineSize; + s += loops * kCacheLineSize; + _mm_sfence(); + } + + // memcpy the tail. + if (len) + { + memcpy(d, s, len); + } + return dst; +#else + // Fallback to regular memcpy when AVX is not available. + return memcpy(dst, src, len); +#endif // __AVX__ + } + + } // namespace crc_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CRC_INTERNAL_NON_TEMPORAL_MEMCPY_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h new file mode 100644 index 00000000..51363253 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h @@ -0,0 +1,124 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: failure_signal_handler.h +// ----------------------------------------------------------------------------- +// +// This file configures the Abseil *failure signal handler* to capture and dump +// useful debugging information (such as a stacktrace) upon program failure. +// +// To use the failure signal handler, call `absl::InstallFailureSignalHandler()` +// very early in your program, usually in the first few lines of main(): +// +// int main(int argc, char** argv) { +// // Initialize the symbolizer to get a human-readable stack trace +// absl::InitializeSymbolizer(argv[0]); +// +// absl::FailureSignalHandlerOptions options; +// absl::InstallFailureSignalHandler(options); +// DoSomethingInteresting(); +// return 0; +// } +// +// Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`, +// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the +// installed failure signal handler and provide debugging information to stderr. +// +// Note that you should *not* install the Abseil failure signal handler more +// than once. You may, of course, have another (non-Abseil) failure signal +// handler installed (which would be triggered if Abseil's failure signal +// handler sets `call_previous_handler` to `true`). + +#ifndef ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ +#define ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // FailureSignalHandlerOptions + // + // Struct for holding `absl::InstallFailureSignalHandler()` configuration + // options. + struct FailureSignalHandlerOptions + { + // If true, try to symbolize the stacktrace emitted on failure, provided that + // you have initialized a symbolizer for that purpose. (See symbolize.h for + // more information.) + bool symbolize_stacktrace = true; + + // If true, try to run signal handlers on an alternate stack (if supported on + // the given platform). An alternate stack is useful for program crashes due + // to a stack overflow; by running on a alternate stack, the signal handler + // may run even when normal stack space has been exhausted. The downside of + // using an alternate stack is that extra memory for the alternate stack needs + // to be pre-allocated. + bool use_alternate_stack = true; + + // If positive, indicates the number of seconds after which the failure signal + // handler is invoked to abort the program. Setting such an alarm is useful in + // cases where the failure signal handler itself may become hung or + // deadlocked. + int alarm_on_failure_secs = 3; + + // If true, call the previously registered signal handler for the signal that + // was received (if one was registered) after the existing signal handler + // runs. This mechanism can be used to chain signal handlers together. + // + // If false, the signal is raised to the default handler for that signal + // (which normally terminates the program). + // + // IMPORTANT: If true, the chained fatal signal handlers must not try to + // recover from the fatal signal. Instead, they should terminate the program + // via some mechanism, like raising the default handler for the signal, or by + // calling `_exit()`. Note that the failure signal handler may put parts of + // the Abseil library into a state from which they cannot recover. + bool call_previous_handler = false; + + // If non-null, indicates a pointer to a callback function that will be called + // upon failure, with a string argument containing failure data. This function + // may be used as a hook to write failure data to a secondary location, such + // as a log file. This function will also be called with null data, as a hint + // to flush any buffered data before the program may be terminated. Consider + // flushing any buffered data in all calls to this function. + // + // Since this function runs within a signal handler, it should be + // async-signal-safe if possible. + // See http://man7.org/linux/man-pages/man7/signal-safety.7.html + void (*writerfn)(const char*) = nullptr; + }; + + // InstallFailureSignalHandler() + // + // Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`, + // `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist + // on the given platform). The failure signal handler dumps program failure data + // useful for debugging in an unspecified format to stderr. This data may + // include the program counter, a stacktrace, and register information on some + // systems; do not rely on an exact format for the output, as it is subject to + // change. + void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options); + + namespace debugging_internal + { + const char* FailureSignalToString(int signo); + } // namespace debugging_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h new file mode 100644 index 00000000..6c2403d4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ +#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Return whether the byte at *addr is readable, without faulting. + // Save and restores errno. + bool AddressIsReadable(const void* addr); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h new file mode 100644 index 00000000..a0060687 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h @@ -0,0 +1,73 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An async-signal-safe and thread-safe demangler for Itanium C++ ABI +// (aka G++ V3 ABI). +// +// The demangler is implemented to be used in async signal handlers to +// symbolize stack traces. We cannot use libstdc++'s +// abi::__cxa_demangle() in such signal handlers since it's not async +// signal safe (it uses malloc() internally). +// +// Note that this demangler doesn't support full demangling. More +// specifically, it doesn't print types of function parameters and +// types of template arguments. It just skips them. However, it's +// still very useful to extract basic information such as class, +// function, constructor, destructor, and operator names. +// +// See the implementation note in demangle.cc if you are interested. +// +// Example: +// +// | Mangled Name | The Demangler | abi::__cxa_demangle() +// |---------------|---------------|----------------------- +// | _Z1fv | f() | f() +// | _Z1fi | f() | f(int) +// | _Z3foo3bar | foo() | foo(bar) +// | _Z1fIiEvi | f<>() | void f(int) +// | _ZN1N1fE | N::f | N::f +// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar() +// | _Zrm1XS_" | operator%() | operator%(X, X) +// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo() +// | _Z1fSs | f() | f(std::basic_string, +// | | | std::allocator >) +// +// See the unit test for more examples. +// +// Note: we might want to write demanglers for ABIs other than Itanium +// C++ ABI in the future. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ +#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Demangle `mangled`. On success, return true and write the + // demangled symbol name to `out`. Otherwise, return false. + // `out` is modified even if demangling is unsuccessful. + bool Demangle(const char* mangled, char* out, size_t out_size); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h new file mode 100644 index 00000000..aec184a2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h @@ -0,0 +1,147 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Allow dynamic symbol lookup for in-memory Elf images. + +#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ +#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ + +// Including this will define the __GLIBC__ macro if glibc is being +// used. +#include + +#include "absl/base/config.h" + +// Maybe one day we can rewrite this file not to require the elf +// symbol extensions in glibc, but for right now we need them. +#ifdef ABSL_HAVE_ELF_MEM_IMAGE +#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set +#endif + +#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ + !defined(__native_client__) && !defined(__asmjs__) && \ + !defined(__wasm__) && !defined(__HAIKU__) && !defined(__sun) && \ + !defined(__VXWORKS__) && !defined(__hexagon__) +#define ABSL_HAVE_ELF_MEM_IMAGE 1 +#endif + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE + +#include // for ElfW + +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // An in-memory ELF image (may not exist on disk). + class ElfMemImage + { + private: + // Sentinel: there could never be an elf image at &kInvalidBaseSentinel. + static const int kInvalidBaseSentinel; + + public: + // Sentinel: there could never be an elf image at this address. + static constexpr const void* const kInvalidBase = + static_cast(&kInvalidBaseSentinel); + + // Information about a single vdso symbol. + // All pointers are into .dynsym, .dynstr, or .text of the VDSO. + // Do not free() them or modify through them. + struct SymbolInfo + { + const char* name; // E.g. "__vdso_getcpu" + const char* version; // E.g. "LINUX_2.6", could be "" + // for unversioned symbol. + const void* address; // Relocated symbol address. + const ElfW(Sym) * symbol; // Symbol in the dynamic symbol table. + }; + + // Supports iteration over all dynamic symbols. + class SymbolIterator + { + public: + friend class ElfMemImage; + const SymbolInfo* operator->() const; + const SymbolInfo& operator*() const; + SymbolIterator& operator++(); + bool operator!=(const SymbolIterator& rhs) const; + bool operator==(const SymbolIterator& rhs) const; + + private: + SymbolIterator(const void* const image, int index); + void Update(int incr); + SymbolInfo info_; + int index_; + const void* const image_; + }; + + explicit ElfMemImage(const void* base); + void Init(const void* base); + bool IsPresent() const + { + return ehdr_ != nullptr; + } + const ElfW(Phdr) * GetPhdr(int index) const; + const ElfW(Sym) * GetDynsym(int index) const; + const ElfW(Versym) * GetVersym(int index) const; + const ElfW(Verdef) * GetVerdef(int index) const; + const ElfW(Verdaux) * GetVerdefAux(const ElfW(Verdef) * verdef) const; + const char* GetDynstr(ElfW(Word) offset) const; + const void* GetSymAddr(const ElfW(Sym) * sym) const; + const char* GetVerstr(ElfW(Word) offset) const; + int GetNumSymbols() const; + + SymbolIterator begin() const; + SymbolIterator end() const; + + // Look up versioned dynamic symbol in the image. + // Returns false if image is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out is non-null, additional details are filled in. + bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if image isn't present + // or doesn't have a symbol overlapping given address. + // If info_out is non-null, additional details are filled in. + bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const; + + private: + const ElfW(Ehdr) * ehdr_; + const ElfW(Sym) * dynsym_; + const ElfW(Versym) * versym_; + const ElfW(Verdef) * verdef_; + const ElfW(Word) * hash_; + const char* dynstr_; + size_t strsize_; + size_t verdefnum_; + ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). + }; + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h new file mode 100644 index 00000000..914290ea --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h @@ -0,0 +1,59 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ +#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Type of function used for printing in stack trace dumping, etc. + // We avoid closures to keep things simple. + typedef void OutputWriter(const char*, void*); + + // RegisterDebugStackTraceHook() allows to register a single routine + // `hook` that is called each time DumpStackTrace() is called. + // `hook` may be called from a signal handler. + typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, OutputWriter* writer, void* writer_arg); + + // Registration of SymbolizeUrlEmitter for use inside of a signal handler. + // This is inherently unsafe and must be signal safe code. + void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); + SymbolizeUrlEmitter GetDebugStackTraceHook(); + + // Returns the program counter from signal context, or nullptr if + // unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of + // ucontext_t on non-POSIX systems. + void* GetProgramCounter(void* const vuc); + + // Uses `writer` to dump the program counter, stack trace, and stack + // frame sizes. + void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], int frame_sizes[], int depth, int min_dropped_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg); + + // Dump current stack trace omitting the topmost `min_dropped_frames` stack + // frames. + void DumpStackTrace(int min_dropped_frames, int max_num_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h new file mode 100644 index 00000000..2afbeeef --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h @@ -0,0 +1,52 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Helper function for measuring stack consumption of signal handlers. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ +#define ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ + +#include "absl/base/config.h" + +// The code in this module is not portable. +// Use this feature test macro to detect its availability. +#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION +#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly +#elif !defined(__APPLE__) && !defined(_WIN32) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \ + defined(__aarch64__) || defined(__riscv)) +#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Returns the stack consumption in bytes for the code exercised by + // signal_handler. To measure stack consumption, signal_handler is registered + // as a signal handler, so the code that it exercises must be async-signal + // safe. The argument of signal_handler is an implementation detail of signal + // handlers and should ignored by the code for signal_handler. Use global + // variables to pass information between your test code and signal_handler. + int GetSignalHandlerStackConsumption(void (*signal_handler)(int)); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION + +#endif // ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc new file mode 100644 index 00000000..3f087162 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -0,0 +1,242 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ + +// Generate stack tracer for aarch64 + +#if defined(__linux__) +#include +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" + +static const size_t kUnknownFrameSize = 0; +// Stack end to use when we don't know the actual stack end +// (effectively just the end of address space). +constexpr uintptr_t kUnknownStackEnd = + std::numeric_limits::max() - sizeof(void *); + +#if defined(__linux__) +// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. +static const unsigned char* GetKernelRtSigreturnAddress() { + constexpr uintptr_t kImpossibleAddress = 1; + ABSL_CONST_INIT static std::atomic memoized{kImpossibleAddress}; + uintptr_t address = memoized.load(std::memory_order_relaxed); + if (address != kImpossibleAddress) { + return reinterpret_cast(address); + } + + address = reinterpret_cast(nullptr); + +#ifdef ABSL_HAVE_VDSO_SUPPORT + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; + auto lookup = [&](int type) { + return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type, + &symbol_info); + }; + if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || + symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + } else { + if (reinterpret_cast(symbol_info.address) != + kImpossibleAddress) { + address = reinterpret_cast(symbol_info.address); + } else { + assert(false && "VDSO returned invalid address"); + } + } + } +#endif + + memoized.store(address, std::memory_order_relaxed); + return reinterpret_cast(address); +} +#endif // __linux__ + +// Compute the size of a stack frame in [low..high). We assume that +// low < high. Return size of kUnknownFrameSize. +template +static inline size_t ComputeStackFrameSize(const T* low, + const T* high) { + const char* low_char_ptr = reinterpret_cast(low); + const char* high_char_ptr = reinterpret_cast(high); + return low < high ? static_cast(high_char_ptr - low_char_ptr) + : kUnknownFrameSize; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_frame_pointer, const void *uc, + size_t stack_low, size_t stack_high) { + void **new_frame_pointer = reinterpret_cast(*old_frame_pointer); + bool check_frame_size = true; + +#if defined(__linux__) + if (WITH_CONTEXT && uc != nullptr) { + // Check to see if next frame's return address is __kernel_rt_sigreturn. + if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) { + const ucontext_t *ucv = static_cast(uc); + // old_frame_pointer[0] is not suitable for unwinding, look at + // ucontext to discover frame pointer before signal. + void **const pre_signal_frame_pointer = + reinterpret_cast(ucv->uc_mcontext.regs[29]); + + // The most recent signal always needs special handling to find the frame + // pointer, but a nested signal does not. If pre_signal_frame_pointer is + // earlier in the stack than the old_frame_pointer, then use it. If it is + // later, then we have already unwound through it and it needs no special + // handling. + if (pre_signal_frame_pointer >= old_frame_pointer) { + new_frame_pointer = pre_signal_frame_pointer; + } + // Check that alleged frame pointer is actually readable. This is to + // prevent "double fault" in case we hit the first fault due to e.g. + // stack corruption. + if (!absl::debugging_internal::AddressIsReadable( + new_frame_pointer)) + return nullptr; + + // Skip frame size check if we return from a signal. We may be using a + // an alternate stack for signals. + check_frame_size = false; + } + } +#endif + + // The frame pointer should be 8-byte aligned. + if ((reinterpret_cast(new_frame_pointer) & 7) != 0) + return nullptr; + + // Check frame size. In strict mode, we assume frames to be under + // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. + if (check_frame_size) { + const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const size_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize) + return nullptr; + // A very large frame may mean corrupt memory or an erroneous frame + // pointer. But also maybe just a plain-old large frame. Assume that if the + // frame is within the known stack, then it is valid. + if (frame_size > max_size) { + if (stack_high < kUnknownStackEnd && + static_cast(getpagesize()) < stack_low) { + const uintptr_t new_fp_u = + reinterpret_cast(new_frame_pointer); + // Stack bounds are known. + if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { + // new_frame_pointer is not within the known stack. + return nullptr; + } + } else { + // Stack bounds are unknown, prefer truncated stack to possible crash. + return nullptr; + } + } + } + + return new_frame_pointer; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { +#ifdef __GNUC__ + void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + skip_count++; // Skip the frame for this function. + int n = 0; + + // Assume that the first page is not stack. + size_t stack_low = static_cast(getpagesize()); + size_t stack_high = kUnknownStackEnd; + + // The frame pointer points to low address of a frame. The first 64-bit + // word of a frame points to the next frame up the call chain, which normally + // is just after the high address of the current frame. The second word of + // a frame contains return address of to the caller. To find a pc value + // associated with the current frame, we need to go down a level in the call + // chain. So we remember return the address of the last frame seen. This + // does not work for the first stack frame, which belongs to UnwindImp() but + // we skip the frame for UnwindImp() anyway. + void* prev_return_address = nullptr; + // The nth frame size is the difference between the nth frame pointer and the + // the frame pointer below it in the call chain. There is no frame below the + // leaf frame, but this function is the leaf anyway, and we skip it. + void** prev_frame_pointer = nullptr; + + while (frame_pointer && n < max_depth) { + if (skip_count > 0) { + skip_count--; + } else { + result[n] = prev_return_address; + if (IS_STACK_FRAMES) { + sizes[n] = static_cast( + ComputeStackFrameSize(prev_frame_pointer, frame_pointer)); + } + n++; + } + prev_return_address = frame_pointer[1]; + prev_frame_pointer = frame_pointer; + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + frame_pointer = NextStackFrame( + frame_pointer, ucp, stack_low, stack_high); + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + frame_pointer = NextStackFrame( + frame_pointer, ucp, stack_low, stack_high); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc new file mode 100644 index 00000000..102a2a12 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc @@ -0,0 +1,139 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This is inspired by Craig Silverstein's PowerPC stacktrace code. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ + +#include + +#include "absl/debugging/stacktrace.h" + +// WARNING: +// This only works if all your code is in either ARM or THUMB mode. With +// interworking, the frame pointer of the caller can either be in r11 (ARM +// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its +// mode in a fixed location on its stack frame. If the caller is a different +// mode, there is no easy way to find the frame pointer. It can either be +// still in the designated register or saved on stack along with other callee +// saved registers. + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return nullptr if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +static void **NextStackFrame(void **old_sp) { + void **new_sp = (void**) old_sp[-1]; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr; + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +#ifdef __GNUC__ +void StacktraceArmDummyFunction() __attribute__((noinline)); +void StacktraceArmDummyFunction() { __asm__ volatile(""); } +#else +# error StacktraceArmDummyFunction() needs to be ported to this platform. +#endif + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void * /* ucp */, int *min_dropped_frames) { +#ifdef __GNUC__ + void **sp = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + + // On ARM, the return address is stored in the link register (r14). + // This is not saved on the stack frame of a leaf function. To + // simplify code that reads return addresses, we call a dummy + // function so that the return address of this function is also + // stored in the stack frame. This works at least for gcc. + StacktraceArmDummyFunction(); + + int n = 0; + while (sp && n < max_depth) { + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + void **next_sp = NextStackFrame(sp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *sp; + + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + sp = next_sp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + sp = NextStackFrame(sp); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h new file mode 100644 index 00000000..5f0ff7a3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h @@ -0,0 +1,88 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing + * actual unwinder implementation. + * This header is "private" to stacktrace.cc. + * DO NOT include it into any other files. +*/ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ + +#include "absl/base/config.h" + +#if defined(ABSL_STACKTRACE_INL_HEADER) +#error ABSL_STACKTRACE_INL_HEADER cannot be directly set + +#elif defined(_WIN32) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_win32-inl.inc" + +#elif defined(__APPLE__) +#ifdef ABSL_HAVE_THREAD_LOCAL +// Thread local support required for UnwindImpl. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // defined(ABSL_HAVE_THREAD_LOCAL) + +// Emscripten stacktraces rely on JS. Do not use them in standalone mode. +#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_emscripten-inl.inc" + +#elif defined(__linux__) && !defined(__ANDROID__) + +#if defined(NO_FRAME_POINTER) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)) +// Note: The libunwind-based implementation is not available to open-source +// users. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_libunwind-inl.inc" +#define STACKTRACE_USES_LIBUNWIND 1 +#elif defined(NO_FRAME_POINTER) && defined(__has_include) +#if __has_include() +// Note: When using glibc this may require -funwind-tables to function properly. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // __has_include() +#elif defined(__i386__) || defined(__x86_64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_x86-inl.inc" +#elif defined(__ppc__) || defined(__PPC__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_powerpc-inl.inc" +#elif defined(__aarch64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_aarch64-inl.inc" +#elif defined(__riscv) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_riscv-inl.inc" +#elif defined(__has_include) +#if __has_include() +// Note: When using glibc this may require -funwind-tables to function properly. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // __has_include() +#endif // defined(__has_include) + +#endif // defined(__linux__) && !defined(__ANDROID__) + +// Fallback to the empty implementation. +#if !defined(ABSL_STACKTRACE_INL_HEADER) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" +#endif + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc new file mode 100644 index 00000000..0f444514 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc @@ -0,0 +1,110 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Portable implementation - just use glibc +// +// Note: The glibc implementation may cause a call to malloc. +// This can cause a deadlock in HeapProfiler. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ + +#include + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/stacktrace.h" + +extern "C" { +uintptr_t emscripten_stack_snapshot(); +uint32_t emscripten_stack_unwind_buffer(uintptr_t pc, void *buffer, + uint32_t depth); +} + +// Sometimes, we can try to get a stack trace from within a stack +// trace, which can cause a self-deadlock. +// Protect against such reentrant call by failing to get a stack trace. +// +// We use __thread here because the code here is extremely low level -- it is +// called while collecting stack traces from within malloc and mmap, and thus +// can not call anything which might call malloc or mmap itself. +static __thread int recursive = 0; + +// The stack trace function might be invoked very early in the program's +// execution (e.g. from the very first malloc). +// As such, we suppress usage of backtrace during this early stage of execution. +static std::atomic disable_stacktraces(true); // Disabled until healthy. +// Waiting until static initializers run seems to be late enough. +// This file is included into stacktrace.cc so this will only run once. +ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { + // Check if we can even create stacktraces. If not, bail early and leave + // disable_stacktraces set as-is. + // clang-format off + if (!EM_ASM_INT({ return (typeof wasmOffsetConverter !== 'undefined'); })) { + return 0; + } + // clang-format on + disable_stacktraces.store(false, std::memory_order_relaxed); + return 0; +}(); + +template +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { + return 0; + } + ++recursive; + + static_cast(ucp); // Unused. + constexpr int kStackLength = 64; + void *stack[kStackLength]; + + int size; + uintptr_t pc = emscripten_stack_snapshot(); + size = emscripten_stack_unwind_buffer(pc, stack, kStackLength); + + int result_count = size - skip_count; + if (result_count < 0) result_count = 0; + if (result_count > max_depth) result_count = max_depth; + for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count]; + + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * result_count); + } + if (min_dropped_frames != nullptr) { + if (size - skip_count - max_depth > 0) { + *min_dropped_frames = size - skip_count - max_depth; + } else { + *min_dropped_frames = 0; + } + } + + --recursive; + + return result_count; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { return true; } +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc new file mode 100644 index 00000000..5fa169a7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Portable implementation - just use glibc +// +// Note: The glibc implementation may cause a call to malloc. +// This can cause a deadlock in HeapProfiler. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ + +#include +#include +#include + +#include "absl/debugging/stacktrace.h" +#include "absl/base/attributes.h" + +// Sometimes, we can try to get a stack trace from within a stack +// trace, because we don't block signals inside this code (which would be too +// expensive: the two extra system calls per stack trace do matter here). +// That can cause a self-deadlock. +// Protect against such reentrant call by failing to get a stack trace. +// +// We use __thread here because the code here is extremely low level -- it is +// called while collecting stack traces from within malloc and mmap, and thus +// can not call anything which might call malloc or mmap itself. +static __thread int recursive = 0; + +// The stack trace function might be invoked very early in the program's +// execution (e.g. from the very first malloc if using tcmalloc). Also, the +// glibc implementation itself will trigger malloc the first time it is called. +// As such, we suppress usage of backtrace during this early stage of execution. +static std::atomic disable_stacktraces(true); // Disabled until healthy. +// Waiting until static initializers run seems to be late enough. +// This file is included into stacktrace.cc so this will only run once. +ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { + void* unused_stack[1]; + // Force the first backtrace to happen early to get the one-time shared lib + // loading (allocation) out of the way. After the first call it is much safer + // to use backtrace from a signal handler if we crash somewhere later. + backtrace(unused_stack, 1); + disable_stacktraces.store(false, std::memory_order_relaxed); + return 0; +}(); + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { + return 0; + } + ++recursive; + + static_cast(ucp); // Unused. + static const int kStackLength = 64; + void * stack[kStackLength]; + int size; + + size = backtrace(stack, kStackLength); + skip_count++; // we want to skip the current frame as well + int result_count = size - skip_count; + if (result_count < 0) + result_count = 0; + if (result_count > max_depth) + result_count = max_depth; + for (int i = 0; i < result_count; i++) + result[i] = stack[i + skip_count]; + + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * static_cast(result_count)); + } + if (min_dropped_frames != nullptr) { + if (size - skip_count - max_depth > 0) { + *min_dropped_frames = size - skip_count - max_depth; + } else { + *min_dropped_frames = 0; + } + } + + --recursive; + + return result_count; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc new file mode 100644 index 00000000..a49ed2f7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc @@ -0,0 +1,258 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace. I'm guessing (hoping!) the code is much like +// for x86. For apple machines, at least, it seems to be; see +// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html +// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK +// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ + +#if defined(__linux__) +#include // for PT_NIP. +#include // for ucontext_t +#endif + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/debugging/stacktrace.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems + +// Given a stack pointer, return the saved link register value. +// Note that this is the link register for a callee. +static inline void *StacktracePowerPCGetLR(void **sp) { + // PowerPC has 3 main ABIs, which say where in the stack the + // Link Register is. For DARWIN and AIX (used by apple and + // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc), + // it's in sp[1]. +#if defined(_CALL_AIX) || defined(_CALL_DARWIN) + return *(sp+2); +#elif defined(_CALL_SYSV) + return *(sp+1); +#elif defined(__APPLE__) || defined(__FreeBSD__) || \ + (defined(__linux__) && defined(__PPC64__)) + // This check is in case the compiler doesn't define _CALL_AIX/etc. + return *(sp+2); +#elif defined(__linux) + // This check is in case the compiler doesn't define _CALL_SYSV. + return *(sp+1); +#else +#error Need to specify the PPC ABI for your architecture. +#endif +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_sp, const void *uc) { + void **new_sp = (void **) *old_sp; + enum { kStackAlignment = 16 }; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr; + +#if defined(__linux__) + enum StackTraceKernelSymbolStatus { + kNotInitialized = 0, kAddressValid, kAddressInvalid }; + + if (IS_WITH_CONTEXT && uc != nullptr) { + static StackTraceKernelSymbolStatus kernel_symbol_status = + kNotInitialized; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not + // possibly be there. + static const unsigned char *kernel_sigtramp_rt64_address = nullptr; + if (kernel_symbol_status == kNotInitialized) { + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo + sigtramp_rt64_symbol_info; + if (!vdso.LookupSymbol( + "__kernel_sigtramp_rt64", "LINUX_2.6.15", + absl::debugging_internal::VDSOSupport::kVDSOSymbolType, + &sigtramp_rt64_symbol_info) || + sigtramp_rt64_symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + kernel_symbol_status = kAddressInvalid; + } else { + kernel_sigtramp_rt64_address = + reinterpret_cast( + sigtramp_rt64_symbol_info.address); + kernel_symbol_status = kAddressValid; + } + } else { + kernel_symbol_status = kAddressInvalid; + } + } + + if (new_sp != nullptr && + kernel_symbol_status == kAddressValid && + StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) { + const ucontext_t* signal_context = + reinterpret_cast(uc); + void **const sp_before_signal = +#if defined(__PPC64__) + reinterpret_cast(signal_context->uc_mcontext.gp_regs[PT_R1]); +#else + reinterpret_cast( + signal_context->uc_mcontext.uc_regs->gregs[PT_R1]); +#endif + // Check that alleged sp before signal is nonnull and is reasonably + // aligned. + if (sp_before_signal != nullptr && + ((uintptr_t)sp_before_signal % kStackAlignment) == 0) { + // Check that alleged stack pointer is actually readable. This is to + // prevent a "double fault" in case we hit the first fault due to e.g. + // a stack corruption. + if (absl::debugging_internal::AddressIsReadable(sp_before_signal)) { + // Alleged stack pointer is readable, use it for further unwinding. + new_sp = sp_before_signal; + } + } + } + } +#endif + + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() { + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + void **sp; + // Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther) + // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a + // different asm syntax. I don't know quite the best way to discriminate + // systems using the old as from the new one; I've gone with __APPLE__. +#ifdef __APPLE__ + __asm__ volatile ("mr %0,r1" : "=r" (sp)); +#else + __asm__ volatile ("mr %0,1" : "=r" (sp)); +#endif + + // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack + // entry that holds the return address of the subroutine call (what + // instruction we run after our function finishes). This is the + // same as the stack-pointer of our parent routine, which is what we + // want here. While the compiler will always(?) set up LR for + // subroutine calls, it may not for leaf functions (such as this one). + // This routine forces the compiler (at least gcc) to push it anyway. + AbslStacktracePowerPCDummyFunction(); + + // The LR save area is used by the callee, so the top entry is bogus. + skip_count++; + + int n = 0; + + // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in + // the link register) of a function call is stored in the caller's stack + // frame instead of the callee's. When we look for the return address + // associated with a stack frame, we need to make sure that there is a + // caller frame before it. So we call NextStackFrame before entering the + // loop below and check next_sp instead of sp for loop termination. + // The outermost frame is set up by runtimes and it does not have a + // caller frame, so it is skipped. + + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few + // bogus entries in some rare cases). + void **next_sp = NextStackFrame(sp, ucp); + + while (next_sp && n < max_depth) { + if (skip_count > 0) { + skip_count--; + } else { + result[n] = StacktracePowerPCGetLR(sp); + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + + sp = next_sp; + next_sp = NextStackFrame(sp, ucp); + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int num_dropped_frames = 0; + for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + next_sp = NextStackFrame(next_sp, ucp); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc new file mode 100644 index 00000000..20183fa3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc @@ -0,0 +1,191 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ + +// Generate stack trace for riscv + +#include + +#include "absl/base/config.h" +#if defined(__linux__) +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/stacktrace.h" + +static const uintptr_t kUnknownFrameSize = 0; + +// Compute the size of a stack frame in [low..high). We assume that low < high. +// Return size of kUnknownFrameSize. +template +static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) { + const char *low_char_ptr = reinterpret_cast(low); + const char *high_char_ptr = reinterpret_cast(high); + return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; +} + +// Given a pointer to a stack frame, locate and return the calling stackframe, +// or return null if no stackframe can be found. Perform sanity checks (the +// strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void ** NextStackFrame(void **old_frame_pointer, const void *uc, + const std::pair range) { + // . + // . + // . + // +-> +----------------+ + // | | return address | + // | | previous fp | + // | | ... | + // | +----------------+ <-+ + // | | return address | | + // +---|- previous fp | | + // | ... | | + // $fp ->|----------------+ | + // | return address | | + // | previous fp -|---+ + // $sp ->| ... | + // +----------------+ + void **new_frame_pointer = reinterpret_cast(old_frame_pointer[-2]); + uintptr_t frame_pointer = reinterpret_cast(new_frame_pointer); + + // The RISCV ELF psABI mandates that the stack pointer is always 16-byte + // aligned. + // TODO(#1236) this doesn't hold for ILP32E which only mandates a 4-byte + // alignment. + if (frame_pointer & 15) + return nullptr; + + // If the new frame pointer matches the signal context, avoid terminating + // early to deal with alternate signal stacks. + if (WITH_CONTEXT) + if (const ucontext_t *ucv = static_cast(uc)) + // RISCV ELF psABI has the frame pointer at x8/fp/s0. + // -- RISCV psABI Table 18.2 + if (ucv->uc_mcontext.__gregs[8] == frame_pointer) + return new_frame_pointer; + + // Check frame size. In strict mode, we assume frames to be under 100,000 + // bytes. In non-strict mode, we relax the limit to 1MB. + const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const uintptr_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize) { + if (STRICT_UNWINDING) + return nullptr; + + // In non-strict mode permit non-contiguous stacks (e.g. alternate signal + // frame handling). + if (reinterpret_cast(new_frame_pointer) < range.first || + reinterpret_cast(new_frame_pointer) > range.second) + return nullptr; + } + + if (frame_size > max_size) + return nullptr; + + return new_frame_pointer; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + // The `frame_pointer` that is computed here points to the top of the frame. + // The two words preceding the address are the return address and the previous + // frame pointer. +#if defined(__GNUC__) + void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); +#else +#error reading stack pointer not yet supported on this platform +#endif + + std::pair stack = { + // assume that the first page is not the stack. + static_cast(sysconf(_SC_PAGESIZE)), + std::numeric_limits::max() - sizeof(void *) + }; + + int n = 0; + void *return_address = nullptr; + while (frame_pointer && n < max_depth) { + return_address = frame_pointer[-1]; + + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). Use the + // non-strict unwinding rules to produce a stack trace that is as complete + // as possible (even if it contains a few bogus entries in some rare cases). + void **next_frame_pointer = + NextStackFrame(frame_pointer, ucp, + stack); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = return_address; + if (IS_STACK_FRAMES) { + sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + } + n++; + } + + frame_pointer = next_frame_pointer; + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + frame_pointer = + NextStackFrame(frame_pointer, ucp, + stack); + } + *min_dropped_frames = num_dropped_frames; + } + + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { return true; } +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc new file mode 100644 index 00000000..5b8fb191 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc @@ -0,0 +1,24 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ + +template +static int UnwindImpl(void** /* result */, int* /* sizes */, + int /* max_depth */, int /* skip_count */, + const void* /* ucp */, int *min_dropped_frames) { + if (min_dropped_frames != nullptr) { + *min_dropped_frames = 0; + } + return 0; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc new file mode 100644 index 00000000..ef2b973e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc @@ -0,0 +1,94 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produces a stack trace for Windows. Normally, one could use +// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that +// should work for binaries compiled using MSVC in "debug" mode. +// However, in "release" mode, Windows uses frame-pointer +// optimization, which makes getting a stack trace very difficult. +// +// There are several approaches one can take. One is to use Windows +// intrinsics like StackWalk64. These can work, but have restrictions +// on how successful they can be. Another attempt is to write a +// version of stacktrace_x86-inl.h that has heuristic support for +// dealing with FPO, similar to what WinDbg does (see +// http://www.nynaeve.net/?p=97). There are (non-working) examples of +// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1 +// +// The solution we've ended up doing is to call the undocumented +// windows function RtlCaptureStackBackTrace, which probably doesn't +// work with FPO but at least is fast, and doesn't require a symbol +// server. +// +// This code is inspired by a patch from David Vitek: +// https://code.google.com/p/google-perftools/issues/detail?id=83 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ + +#include // for GetProcAddress and GetModuleHandle +#include + +typedef USHORT NTAPI RtlCaptureStackBackTrace_Function( + IN ULONG frames_to_skip, + IN ULONG frames_to_capture, + OUT PVOID *backtrace, + OUT PULONG backtrace_hash); + +// It is not possible to load RtlCaptureStackBackTrace at static init time in +// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = + &::CaptureStackBackTrace; +#else +// Load the function we need at static init time, where we don't have +// to worry about someone else holding the loader's lock. +static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = + (RtlCaptureStackBackTrace_Function*)GetProcAddress( + GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace"); +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void*, int* min_dropped_frames) { + USHORT n = 0; + if (!RtlCaptureStackBackTrace_fn || skip_count < 0 || max_depth < 0) { + // can't get a stacktrace with no function/invalid args + } else { + n = RtlCaptureStackBackTrace_fn(static_cast(skip_count) + 2, + static_cast(max_depth), result, 0); + } + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * n); + } + if (min_dropped_frames != nullptr) { + // Not implemented. + *min_dropped_frames = 0; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc new file mode 100644 index 00000000..1975ba74 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc @@ -0,0 +1,394 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ + +#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__)) +#include // for ucontext_t +#endif + +#if !defined(_WIN32) +#include +#endif + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" + +using absl::debugging_internal::AddressIsReadable; + +#if defined(__linux__) && defined(__i386__) +// Count "push %reg" instructions in VDSO __kernel_vsyscall(), +// preceding "syscall" or "sysenter". +// If __kernel_vsyscall uses frame pointer, answer 0. +// +// kMaxBytes tells how many instruction bytes of __kernel_vsyscall +// to analyze before giving up. Up to kMaxBytes+1 bytes of +// instructions could be accessed. +// +// Here are known __kernel_vsyscall instruction sequences: +// +// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S). +// Used on Intel. +// 0xffffe400 <__kernel_vsyscall+0>: push %ecx +// 0xffffe401 <__kernel_vsyscall+1>: push %edx +// 0xffffe402 <__kernel_vsyscall+2>: push %ebp +// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp +// 0xffffe405 <__kernel_vsyscall+5>: sysenter +// +// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S). +// Used on AMD. +// 0xffffe400 <__kernel_vsyscall+0>: push %ebp +// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp +// 0xffffe403 <__kernel_vsyscall+3>: syscall +// + +// The sequence below isn't actually expected in Google fleet, +// here only for completeness. Remove this comment from OSS release. + +// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S) +// 0xffffe400 <__kernel_vsyscall+0>: int $0x80 +// 0xffffe401 <__kernel_vsyscall+1>: ret +// +static const int kMaxBytes = 10; + +// We use assert()s instead of DCHECK()s -- this is too low level +// for DCHECK(). + +static int CountPushInstructions(const unsigned char *const addr) { + int result = 0; + for (int i = 0; i < kMaxBytes; ++i) { + if (addr[i] == 0x89) { + // "mov reg,reg" + if (addr[i + 1] == 0xE5) { + // Found "mov %esp,%ebp". + return 0; + } + ++i; // Skip register encoding byte. + } else if (addr[i] == 0x0F && + (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) { + // Found "sysenter" or "syscall". + return result; + } else if ((addr[i] & 0xF0) == 0x50) { + // Found "push %reg". + ++result; + } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) { + // Found "int $0x80" + assert(result == 0); + return 0; + } else { + // Unexpected instruction. + assert(false && "unexpected instruction in __kernel_vsyscall"); + return 0; + } + } + // Unexpected: didn't find SYSENTER or SYSCALL in + // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval. + assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall"); + return 0; +} +#endif + +// Assume stack frames larger than 100,000 bytes are bogus. +static const int kMaxFrameBytes = 100000; +// Stack end to use when we don't know the actual stack end +// (effectively just the end of address space). +constexpr uintptr_t kUnknownStackEnd = + std::numeric_limits::max() - sizeof(void *); + +// Returns the stack frame pointer from signal context, 0 if unknown. +// vuc is a ucontext_t *. We use void* to avoid the use +// of ucontext_t on non-POSIX systems. +static uintptr_t GetFP(const void *vuc) { +#if !defined(__linux__) + static_cast(vuc); // Avoid an unused argument compiler warning. +#else + if (vuc != nullptr) { + auto *uc = reinterpret_cast(vuc); +#if defined(__i386__) + const auto bp = uc->uc_mcontext.gregs[REG_EBP]; + const auto sp = uc->uc_mcontext.gregs[REG_ESP]; +#elif defined(__x86_64__) + const auto bp = uc->uc_mcontext.gregs[REG_RBP]; + const auto sp = uc->uc_mcontext.gregs[REG_RSP]; +#else + const uintptr_t bp = 0; + const uintptr_t sp = 0; +#endif + // Sanity-check that the base pointer is valid. It's possible that some + // code in the process is compiled with --copt=-fomit-frame-pointer or + // --copt=-momit-leaf-frame-pointer. + // + // TODO(bcmills): -momit-leaf-frame-pointer is currently the default + // behavior when building with clang. Talk to the C++ toolchain team about + // fixing that. + if (bp >= sp && bp - sp <= kMaxFrameBytes) + return static_cast(bp); + + // If bp isn't a plausible frame pointer, return the stack pointer instead. + // If we're lucky, it points to the start of a stack frame; otherwise, we'll + // get one frame of garbage in the stack trace and fail the sanity check on + // the next iteration. + return static_cast(sp); + } +#endif + return 0; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_fp, const void *uc, + size_t stack_low, size_t stack_high) { + void **new_fp = (void **)*old_fp; + +#if defined(__linux__) && defined(__i386__) + if (WITH_CONTEXT && uc != nullptr) { + // How many "push %reg" instructions are there at __kernel_vsyscall? + // This is constant for a given kernel and processor, so compute + // it only once. + static int num_push_instructions = -1; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly + // be there. + static const unsigned char *kernel_rt_sigreturn_address = nullptr; + static const unsigned char *kernel_vsyscall_address = nullptr; + if (num_push_instructions == -1) { +#ifdef ABSL_HAVE_VDSO_SUPPORT + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo + rt_sigreturn_symbol_info; + absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info; + if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC, + &rt_sigreturn_symbol_info) || + !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC, + &vsyscall_symbol_info) || + rt_sigreturn_symbol_info.address == nullptr || + vsyscall_symbol_info.address == nullptr) { + // Unexpected: 32-bit VDSO is present, yet one of the expected + // symbols is missing or null. + assert(false && "VDSO is present, but doesn't have expected symbols"); + num_push_instructions = 0; + } else { + kernel_rt_sigreturn_address = + reinterpret_cast( + rt_sigreturn_symbol_info.address); + kernel_vsyscall_address = + reinterpret_cast( + vsyscall_symbol_info.address); + num_push_instructions = + CountPushInstructions(kernel_vsyscall_address); + } + } else { + num_push_instructions = 0; + } +#else // ABSL_HAVE_VDSO_SUPPORT + num_push_instructions = 0; +#endif // ABSL_HAVE_VDSO_SUPPORT + } + if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr && + old_fp[1] == kernel_rt_sigreturn_address) { + const ucontext_t *ucv = static_cast(uc); + // This kernel does not use frame pointer in its VDSO code, + // and so %ebp is not suitable for unwinding. + void **const reg_ebp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EBP]); + const unsigned char *const reg_eip = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EIP]); + if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip && + reg_eip - kernel_vsyscall_address < kMaxBytes) { + // We "stepped up" to __kernel_vsyscall, but %ebp is not usable. + // Restore from 'ucv' instead. + void **const reg_esp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_ESP]); + // Check that alleged %esp is not null and is reasonably aligned. + if (reg_esp && + ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) { + // Check that alleged %esp is actually readable. This is to prevent + // "double fault" in case we hit the first fault due to e.g. stack + // corruption. + void *const reg_esp2 = reg_esp[num_push_instructions - 1]; + if (AddressIsReadable(reg_esp2)) { + // Alleged %esp is readable, use it for further unwinding. + new_fp = reinterpret_cast(reg_esp2); + } + } + } + } + } +#endif + + const uintptr_t old_fp_u = reinterpret_cast(old_fp); + const uintptr_t new_fp_u = reinterpret_cast(new_fp); + + // Check that the transition from frame pointer old_fp to frame + // pointer new_fp isn't clearly bogus. Skip the checks if new_fp + // matches the signal context, so that we don't skip out early when + // using an alternate signal stack. + // + // TODO(bcmills): The GetFP call should be completely unnecessary when + // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's + // stack by this point), but it is empirically still needed (e.g. when the + // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some + // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what + // it's supposed to. + if (STRICT_UNWINDING && + (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_fp_u <= old_fp_u) return nullptr; + + // If we get a very large frame size, it may be an indication that we + // guessed frame pointers incorrectly and now risk a paging fault + // dereferencing a wrong frame pointer. Or maybe not because large frames + // are possible as well. The main stack is assumed to be readable, + // so we assume the large frame is legit if we know the real stack bounds + // and are within the stack. + if (new_fp_u - old_fp_u > kMaxFrameBytes) { + if (stack_high < kUnknownStackEnd && + static_cast(getpagesize()) < stack_low) { + // Stack bounds are known. + if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { + // new_fp_u is not within the known stack. + return nullptr; + } + } else { + // Stack bounds are unknown, prefer truncated stack to possible crash. + return nullptr; + } + } + if (stack_low < old_fp_u && old_fp_u <= stack_high) { + // Old BP was in the expected stack region... + if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { + // ... but new BP is outside of expected stack region. + // It is most likely bogus. + return nullptr; + } + } else { + // We may be here if we are executing in a co-routine with a + // separate stack. We can't do safety checks in this case. + } + } else { + if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_fp == old_fp) return nullptr; + } + + if (new_fp_u & (sizeof(void *) - 1)) return nullptr; +#ifdef __i386__ + // On 32-bit machines, the stack pointer can be very close to + // 0xffffffff, so we explicitly check for a pointer into the + // last two pages in the address space + if (new_fp_u >= 0xffffe000) return nullptr; +#endif +#if !defined(_WIN32) + if (!STRICT_UNWINDING) { + // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test + // on AMD-based machines with VDSO-enabled kernels. + // Make an extra sanity check to insure new_fp is readable. + // Note: NextStackFrame() is only called while the program + // is already on its last leg, so it's ok to be slow here. + + if (!AddressIsReadable(new_fp)) { + return nullptr; + } + } +#endif + return new_fp; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +ABSL_ATTRIBUTE_NOINLINE +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + int n = 0; + void **fp = reinterpret_cast(__builtin_frame_address(0)); + + // Assume that the first page is not stack. + size_t stack_low = static_cast(getpagesize()); + size_t stack_high = kUnknownStackEnd; + + while (fp && n < max_depth) { + if (*(fp + 1) == reinterpret_cast(0)) { + // In 64-bit code, we often see a frame that + // points to itself and has a return address of 0. + break; + } + void **next_fp = NextStackFrame( + fp, ucp, stack_low, stack_high); + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *(fp + 1); + if (IS_STACK_FRAMES) { + if (next_fp > fp) { + sizes[n] = static_cast( + reinterpret_cast(next_fp) - + reinterpret_cast(fp)); + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + fp = next_fp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int num_dropped_frames = 0; + for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + fp = NextStackFrame(fp, ucp, stack_low, + stack_high); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h new file mode 100644 index 00000000..9285df74 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h @@ -0,0 +1,151 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains internal parts of the Abseil symbolizer. +// Do not depend on the anything in this file, it may change at anytime. + +#ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ +#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ + +#ifdef __cplusplus + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set +#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && !defined(__asmjs__) && !defined(__wasm__) +#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 + +#include +#include // For ElfW() macro. +#include +#include + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Iterates over all sections, invoking callback on each with the section name + // and the section header. + // + // Returns true on success; otherwise returns false in case of errors. + // + // This is not async-signal-safe. + bool ForEachSection(int fd, const std::function& callback); + + // Gets the section header for the given name, if it exists. Returns true on + // success. Otherwise, returns false. + bool GetSectionHeaderByName(int fd, const char* name, size_t name_len, ElfW(Shdr) * out); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE + +#ifdef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE cannot be directly set +#elif defined(__APPLE__) +#define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1 +#endif + +#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set +#elif defined(__EMSCRIPTEN__) +#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + struct SymbolDecoratorArgs + { + // The program counter we are getting symbolic name for. + const void* pc; + // 0 for main executable, load address for shared libraries. + ptrdiff_t relocation; + // Read-only file descriptor for ELF image covering "pc", + // or -1 if no such ELF image exists in /proc/self/maps. + int fd; + // Output buffer, size. + // Note: the buffer may not be empty -- default symbolizer may have already + // produced some output, and earlier decorators may have adorned it in + // some way. You are free to replace or augment the contents (within the + // symbol_buf_size limit). + char* const symbol_buf; + size_t symbol_buf_size; + // Temporary scratch space, size. + // Use that space in preference to allocating your own stack buffer to + // conserve stack. + char* const tmp_buf; + size_t tmp_buf_size; + // User-provided argument + void* arg; + }; + using SymbolDecorator = void (*)(const SymbolDecoratorArgs*); + + // Installs a function-pointer as a decorator. Returns a value less than zero + // if the system cannot install the decorator. Otherwise, returns a unique + // identifier corresponding to the decorator. This identifier can be used to + // uninstall the decorator - See RemoveSymbolDecorator() below. + int InstallSymbolDecorator(SymbolDecorator decorator, void* arg); + + // Removes a previously installed function-pointer decorator. Parameter "ticket" + // is the return-value from calling InstallSymbolDecorator(). + bool RemoveSymbolDecorator(int ticket); + + // Remove all installed decorators. Returns true if successful, false if + // symbolization is currently in progress. + bool RemoveAllSymbolDecorators(); + + // Registers an address range to a file mapping. + // + // Preconditions: + // start <= end + // filename != nullptr + // + // Returns true if the file was successfully registered. + bool RegisterFileMappingHint(const void* start, const void* end, uint64_t offset, const char* filename); + + // Looks up the file mapping registered by RegisterFileMappingHint for an + // address range. If there is one, the file name is stored in *filename and + // *start and *end are modified to reflect the registered mapping. Returns + // whether any hint was found. + bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // __cplusplus + +#include + +#ifdef __cplusplus +extern "C" +#endif // __cplusplus + + bool + AbslInternalGetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename); + +#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h new file mode 100644 index 00000000..6707098d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h @@ -0,0 +1,175 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Allow dynamic symbol lookup in the kernel VDSO page. +// +// VDSO stands for "Virtual Dynamic Shared Object" -- a page of +// executable code, which looks like a shared library, but doesn't +// necessarily exist anywhere on disk, and which gets mmap()ed into +// every process by kernels which support VDSO, such as 2.6.x for 32-bit +// executables, and 2.6.24 and above for 64-bit executables. +// +// More details could be found here: +// http://www.trilithium.com/johan/2005/08/linux-gate/ +// +// VDSOSupport -- a class representing kernel VDSO (if present). +// +// Example usage: +// VDSOSupport vdso; +// VDSOSupport::SymbolInfo info; +// typedef (*FN)(unsigned *, void *, void *); +// FN fn = nullptr; +// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) { +// fn = reinterpret_cast(info.address); +// } + +#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ +#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/internal/elf_mem_image.h" + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE + +#ifdef ABSL_HAVE_VDSO_SUPPORT +#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set +#else +#define ABSL_HAVE_VDSO_SUPPORT 1 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // NOTE: this class may be used from within tcmalloc, and can not + // use any memory allocation routines. + class VDSOSupport + { + public: + VDSOSupport(); + + typedef ElfMemImage::SymbolInfo SymbolInfo; + typedef ElfMemImage::SymbolIterator SymbolIterator; + + // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE + // depending on how the kernel is built. The kernel is normally built with + // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a + // compile-time constant. +#ifdef __powerpc64__ + enum + { + kVDSOSymbolType = STT_NOTYPE + }; +#else + enum + { + kVDSOSymbolType = STT_FUNC + }; +#endif + + // Answers whether we have a vdso at all. + bool IsPresent() const + { + return image_.IsPresent(); + } + + // Allow to iterate over all VDSO symbols. + SymbolIterator begin() const + { + return image_.begin(); + } + SymbolIterator end() const + { + return image_.end(); + } + + // Look up versioned dynamic symbol in the kernel VDSO. + // Returns false if VDSO is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if VDSO isn't present + // or doesn't have a symbol overlapping given address. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const; + + // Used only for testing. Replace real VDSO base with a mock. + // Returns previous value of vdso_base_. After you are done testing, + // you are expected to call SetBase() with previous value, in order to + // reset state to the way it was. + const void* SetBase(const void* s); + + // Computes vdso_base_ and returns it. Should be called as early as + // possible; before any thread creation, chroot or setuid. + static const void* Init(); + + private: + // image_ represents VDSO ELF image in memory. + // image_.ehdr_ == nullptr implies there is no VDSO. + ElfMemImage image_; + + // Cached value of auxv AT_SYSINFO_EHDR, computed once. + // This is a tri-state: + // kInvalidBase => value hasn't been determined yet. + // 0 => there is no VDSO. + // else => vma of VDSO Elf{32,64}_Ehdr. + // + // When testing with mock VDSO, low bit is set. + // The low bit is always available because vdso_base_ is + // page-aligned. + static std::atomic vdso_base_; + + // NOLINT on 'long' because these routines mimic kernel api. + // The 'cache' parameter may be used by some versions of the kernel, + // and should be nullptr or point to a static buffer containing at + // least two 'long's. + static long InitAndGetCPU(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + static long GetCPUViaSyscall(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + typedef long (*GetCpuFn)(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + + // This function pointer may point to InitAndGetCPU, + // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization. + ABSL_CONST_INIT static std::atomic getcpu_fn_; + + friend int GetCPU(void); // Needs access to getcpu_fn_. + + VDSOSupport(const VDSOSupport&) = delete; + VDSOSupport& operator=(const VDSOSupport&) = delete; + }; + + // Same as sched_getcpu() on later glibc versions. + // Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present, + // otherwise use syscall(SYS_getcpu,...). + // May return -1 with errno == ENOSYS if the kernel doesn't + // support SYS_getcpu. + int GetCPU(); + + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/leak_check.h b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h new file mode 100644 index 00000000..fd8d5f06 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h @@ -0,0 +1,153 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: leak_check.h +// ----------------------------------------------------------------------------- +// +// This file contains functions that affect leak checking behavior within +// targets built with the LeakSanitizer (LSan), a memory leak detector that is +// integrated within the AddressSanitizer (ASan) as an additional component, or +// which can be used standalone. LSan and ASan are included (or can be provided) +// as additional components for most compilers such as Clang, gcc and MSVC. +// Note: this leak checking API is not yet supported in MSVC. +// Leak checking is enabled by default in all ASan builds. +// +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer +// is enabled. To use the mode, simply pass `-fsanitize=address` to both the +// compiler and linker. An example Bazel command could be +// +// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ... +// +// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does +// not also use AddressSanitizer). To use the mode, simply pass +// `-fsanitize=leak` to both the compiler and linker. Since GCC does not +// currently provide a way of detecting this mode at compile-time, GCC users +// must also pass -DLEAK_SANITIZER to the compiler. An example Bazel command +// could be +// +// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak +// --linkopt=-fsanitize=leak ... +// +// ----------------------------------------------------------------------------- +#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_ +#define ABSL_DEBUGGING_LEAK_CHECK_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // HaveLeakSanitizer() + // + // Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is + // currently built into this target. + bool HaveLeakSanitizer(); + + // LeakCheckerIsActive() + // + // Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is + // currently built into this target and is turned on. + bool LeakCheckerIsActive(); + + // DoIgnoreLeak() + // + // Implements `IgnoreLeak()` below. This function should usually + // not be called directly; calling `IgnoreLeak()` is preferred. + void DoIgnoreLeak(const void* ptr); + + // IgnoreLeak() + // + // Instruct the leak sanitizer to ignore leak warnings on the object referenced + // by the passed pointer, as well as all heap objects transitively referenced + // by it. The passed object pointer can point to either the beginning of the + // object or anywhere within it. + // + // Example: + // + // static T* obj = IgnoreLeak(new T(...)); + // + // If the passed `ptr` does not point to an actively allocated object at the + // time `IgnoreLeak()` is called, the call is a no-op; if it is actively + // allocated, leak sanitizer will assume this object is referenced even if + // there is no actual reference in user memory. + // + template + T* IgnoreLeak(T* ptr) + { + DoIgnoreLeak(ptr); + return ptr; + } + + // FindAndReportLeaks() + // + // If any leaks are detected, prints a leak report and returns true. This + // function may be called repeatedly, and does not affect end-of-process leak + // checking. + // + // Example: + // if (FindAndReportLeaks()) { + // ... diagnostic already printed. Exit with failure code. + // exit(1) + // } + bool FindAndReportLeaks(); + + // LeakCheckDisabler + // + // This helper class indicates that any heap allocations done in the code block + // covered by the scoped object, which should be allocated on the stack, will + // not be reported as leaks. Leak check disabling will occur within the code + // block and any nested function calls within the code block. + // + // Example: + // + // void Foo() { + // LeakCheckDisabler disabler; + // ... code that allocates objects whose leaks should be ignored ... + // } + // + // REQUIRES: Destructor runs in same thread as constructor + class LeakCheckDisabler + { + public: + LeakCheckDisabler(); + LeakCheckDisabler(const LeakCheckDisabler&) = delete; + LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete; + ~LeakCheckDisabler(); + }; + + // RegisterLivePointers() + // + // Registers `ptr[0,size-1]` as pointers to memory that is still actively being + // referenced and for which leak checking should be ignored. This function is + // useful if you store pointers in mapped memory, for memory ranges that we know + // are correct but for which normal analysis would flag as leaked code. + void RegisterLivePointers(const void* ptr, size_t size); + + // UnRegisterLivePointers() + // + // Deregisters the pointers previously marked as active in + // `RegisterLivePointers()`, enabling leak checking of those pointers. + void UnRegisterLivePointers(const void* ptr, size_t size); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_LEAK_CHECK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h new file mode 100644 index 00000000..28c7598a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h @@ -0,0 +1,223 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: stacktrace.h +// ----------------------------------------------------------------------------- +// +// This file contains routines to extract the current stack trace and associated +// stack frames. These functions are thread-safe and async-signal-safe. +// +// Note that stack trace functionality is platform dependent and requires +// additional support from the compiler/build system in most cases. (That is, +// this functionality generally only works on platforms/builds that have been +// specifically configured to support it.) +// +// Note: stack traces in Abseil that do not utilize a symbolizer will result in +// frames consisting of function addresses rather than human-readable function +// names. (See symbolize.h for information on symbolizing these values.) + +#ifndef ABSL_DEBUGGING_STACKTRACE_H_ +#define ABSL_DEBUGGING_STACKTRACE_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // GetStackFrames() + // + // Records program counter values for up to `max_depth` frames, skipping the + // most recent `skip_count` stack frames, stores their corresponding values + // and sizes in `results` and `sizes` buffers, and returns the number of frames + // stored. (Note that the frame generated for the `absl::GetStackFrames()` + // routine itself is also skipped.) + // + // Example: + // + // main() { foo(); } + // foo() { bar(); } + // bar() { + // void* result[10]; + // int sizes[10]; + // int depth = absl::GetStackFrames(result, sizes, 10, 1); + // } + // + // The current stack frame would consist of three function calls: `bar()`, + // `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets + // `skip_count` to `1`, it will skip the frame for `bar()`, the most recently + // invoked function call. It will therefore return 2 and fill `result` with + // program counters within the following functions: + // + // result[0] foo() + // result[1] main() + // + // (Note: in practice, a few more entries after `main()` may be added to account + // for startup processes.) + // + // Corresponding stack frame sizes will also be recorded: + // + // sizes[0] 16 + // sizes[1] 16 + // + // (Stack frame sizes of `16` above are just for illustration purposes.) + // + // Stack frame sizes of 0 or less indicate that those frame sizes couldn't + // be identified. + // + // This routine may return fewer stack frame entries than are + // available. Also note that `result` and `sizes` must both be non-null. + extern int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count); + + // GetStackFramesWithContext() + // + // Records program counter values obtained from a signal handler. Records + // program counter values for up to `max_depth` frames, skipping the most recent + // `skip_count` stack frames, stores their corresponding values and sizes in + // `results` and `sizes` buffers, and returns the number of frames stored. (Note + // that the frame generated for the `absl::GetStackFramesWithContext()` routine + // itself is also skipped.) + // + // The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value + // passed to a signal handler registered via the `sa_sigaction` field of a + // `sigaction` struct. (See + // http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may + // help a stack unwinder to provide a better stack trace under certain + // conditions. `uc` may safely be null. + // + // The `min_dropped_frames` output parameter, if non-null, points to the + // location to note any dropped stack frames, if any, due to buffer limitations + // or other reasons. (This value will be set to `0` if no frames were dropped.) + // The number of total stack frames is guaranteed to be >= skip_count + + // max_depth + *min_dropped_frames. + extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); + + // GetStackTrace() + // + // Records program counter values for up to `max_depth` frames, skipping the + // most recent `skip_count` stack frames, stores their corresponding values + // in `results`, and returns the number of frames + // stored. Note that this function is similar to `absl::GetStackFrames()` + // except that it returns the stack trace only, and not stack frame sizes. + // + // Example: + // + // main() { foo(); } + // foo() { bar(); } + // bar() { + // void* result[10]; + // int depth = absl::GetStackTrace(result, 10, 1); + // } + // + // This produces: + // + // result[0] foo + // result[1] main + // .... ... + // + // `result` must not be null. + extern int GetStackTrace(void** result, int max_depth, int skip_count); + + // GetStackTraceWithContext() + // + // Records program counter values obtained from a signal handler. Records + // program counter values for up to `max_depth` frames, skipping the most recent + // `skip_count` stack frames, stores their corresponding values in `results`, + // and returns the number of frames stored. (Note that the frame generated for + // the `absl::GetStackFramesWithContext()` routine itself is also skipped.) + // + // The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value + // passed to a signal handler registered via the `sa_sigaction` field of a + // `sigaction` struct. (See + // http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may + // help a stack unwinder to provide a better stack trace under certain + // conditions. `uc` may safely be null. + // + // The `min_dropped_frames` output parameter, if non-null, points to the + // location to note any dropped stack frames, if any, due to buffer limitations + // or other reasons. (This value will be set to `0` if no frames were dropped.) + // The number of total stack frames is guaranteed to be >= skip_count + + // max_depth + *min_dropped_frames. + extern int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); + + // SetStackUnwinder() + // + // Provides a custom function for unwinding stack frames that will be used in + // place of the default stack unwinder when invoking the static + // GetStack{Frames,Trace}{,WithContext}() functions above. + // + // The arguments passed to the unwinder function will match the + // arguments passed to `absl::GetStackFramesWithContext()` except that sizes + // will be non-null iff the caller is interested in frame sizes. + // + // If unwinder is set to null, we revert to the default stack-tracing behavior. + // + // ***************************************************************************** + // WARNING + // ***************************************************************************** + // + // absl::SetStackUnwinder is not suitable for general purpose use. It is + // provided for custom runtimes. + // Some things to watch out for when calling `absl::SetStackUnwinder()`: + // + // (a) The unwinder may be called from within signal handlers and + // therefore must be async-signal-safe. + // + // (b) Even after a custom stack unwinder has been unregistered, other + // threads may still be in the process of using that unwinder. + // Therefore do not clean up any state that may be needed by an old + // unwinder. + // ***************************************************************************** + extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames)); + + // DefaultStackUnwinder() + // + // Records program counter values of up to `max_depth` frames, skipping the most + // recent `skip_count` stack frames, and stores their corresponding values in + // `pcs`. (Note that the frame generated for this call itself is also skipped.) + // This function acts as a generic stack-unwinder; prefer usage of the more + // specific `GetStack{Trace,Frames}{,WithContext}()` functions above. + // + // If you have set your own stack unwinder (with the `SetStackUnwinder()` + // function above, you can still get the default stack unwinder by calling + // `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder + // and use the default one instead. + // + // Because this function is generic, only `pcs` is guaranteed to be non-null + // upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all + // be null when called. + // + // The semantics are the same as the corresponding `GetStack*()` function in the + // case where `absl::SetStackUnwinder()` was never called. Equivalents are: + // + // null sizes | non-nullptr sizes + // |==========================================================| + // null uc | GetStackTrace() | GetStackFrames() | + // non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() | + // |==========================================================| + extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); + + namespace debugging_internal + { + // Returns true for platforms which are expected to have functioning stack trace + // implementations. Intended to be used for tests which want to exclude + // verification of logic known to be broken because stack traces are not + // working. + extern bool StackTraceWorksForTest(); + } // namespace debugging_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_STACKTRACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h new file mode 100644 index 00000000..db68479f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h @@ -0,0 +1,100 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: symbolize.h +// ----------------------------------------------------------------------------- +// +// This file configures the Abseil symbolizer for use in converting instruction +// pointer addresses (program counters) into human-readable names (function +// calls, etc.) within Abseil code. +// +// The symbolizer may be invoked from several sources: +// +// * Implicitly, through the installation of an Abseil failure signal handler. +// (See failure_signal_handler.h for more information.) +// * By calling `Symbolize()` directly on a program counter you obtain through +// `absl::GetStackTrace()` or `absl::GetStackFrames()`. (See stacktrace.h +// for more information. +// * By calling `Symbolize()` directly on a program counter you obtain through +// other means (which would be platform-dependent). +// +// In all of the above cases, the symbolizer must first be initialized before +// any program counter values can be symbolized. If you are installing a failure +// signal handler, initialize the symbolizer before you do so. +// +// Example: +// +// int main(int argc, char** argv) { +// // Initialize the Symbolizer before installing the failure signal handler +// absl::InitializeSymbolizer(argv[0]); +// +// // Now you may install the failure signal handler +// absl::FailureSignalHandlerOptions options; +// absl::InstallFailureSignalHandler(options); +// +// // Start running your main program +// ... +// return 0; +// } +// +#ifndef ABSL_DEBUGGING_SYMBOLIZE_H_ +#define ABSL_DEBUGGING_SYMBOLIZE_H_ + +#include "absl/debugging/internal/symbolize.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // InitializeSymbolizer() + // + // Initializes the program counter symbolizer, given the path of the program + // (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer + // allows you to read program counters (instruction pointer values) using their + // human-readable names within output such as stack traces. + // + // Example: + // + // int main(int argc, char *argv[]) { + // absl::InitializeSymbolizer(argv[0]); + // // Now you can use the symbolizer + // } + void InitializeSymbolizer(const char* argv0); + // + // Symbolize() + // + // Symbolizes a program counter (instruction pointer value) `pc` and, on + // success, writes the name to `out`. The symbol name is demangled, if possible. + // Note that the symbolized name may be truncated and will be NUL-terminated. + // Demangling is supported for symbols generated by GCC 3.x or newer). Returns + // `false` on failure. + // + // Example: + // + // // Print a program counter and its symbol name. + // static void DumpPCAndSymbol(void *pc) { + // char tmp[1024]; + // const char *symbol = "(unknown)"; + // if (absl::Symbolize(pc, tmp, sizeof(tmp))) { + // symbol = tmp; + // } + // absl::PrintF("%p %s\n", pc, symbol); + // } + bool Symbolize(const void* pc, char* out, int out_size); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc new file mode 100644 index 00000000..cf63d191 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc @@ -0,0 +1,102 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +void InitializeSymbolizer(const char*) {} + +namespace debugging_internal { +namespace { + +static std::string GetSymbolString(absl::string_view backtrace_line) { + // Example Backtrace lines: + // 0 libimaging_shared.dylib 0x018c152a + // _ZNSt11_Deque_baseIN3nik7mediadb4PageESaIS2_EE17_M_initialize_mapEm + 3478 + // + // or + // 0 libimaging_shared.dylib 0x0000000001895c39 + // _ZN3nik4util19register_shared_ptrINS_3gpu7TextureEEEvPKvS5_ + 39 + // + // or + // 0 mysterious_app 0x0124000120120009 main + 17 + auto address_pos = backtrace_line.find(" 0x"); + if (address_pos == absl::string_view::npos) return std::string(); + absl::string_view symbol_view = backtrace_line.substr(address_pos + 1); + + auto space_pos = symbol_view.find(" "); + if (space_pos == absl::string_view::npos) return std::string(); + symbol_view = symbol_view.substr(space_pos + 1); // to mangled symbol + + auto plus_pos = symbol_view.find(" + "); + if (plus_pos == absl::string_view::npos) return std::string(); + symbol_view = symbol_view.substr(0, plus_pos); // strip remainng + + return std::string(symbol_view); +} + +} // namespace +} // namespace debugging_internal + +bool Symbolize(const void* pc, char* out, int out_size) { + if (out_size <= 0 || pc == nullptr) { + out = nullptr; + return false; + } + + // This allocates a char* array. + char** frame_strings = backtrace_symbols(const_cast(&pc), 1); + + if (frame_strings == nullptr) return false; + + std::string symbol = debugging_internal::GetSymbolString(frame_strings[0]); + free(frame_strings); + + char tmp_buf[1024]; + if (debugging_internal::Demangle(symbol.c_str(), tmp_buf, sizeof(tmp_buf))) { + size_t len = strlen(tmp_buf); + if (len + 1 <= static_cast(out_size)) { // +1 for '\0' + assert(len < sizeof(tmp_buf)); + memmove(out, tmp_buf, len + 1); + } + } else { + strncpy(out, symbol.c_str(), static_cast(out_size)); + } + + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + size_t ellipsis_size = + std::min(sizeof(kEllipsis) - 1, static_cast(out_size) - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc new file mode 100644 index 00000000..30638cb2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc @@ -0,0 +1,1661 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This library provides Symbolize() function that symbolizes program +// counters to their corresponding symbol names on linux platforms. +// This library has a minimal implementation of an ELF symbol table +// reader (i.e. it doesn't depend on libelf, etc.). +// +// The algorithm used in Symbolize() is as follows. +// +// 1. Go through a list of maps in /proc/self/maps and find the map +// containing the program counter. +// +// 2. Open the mapped file and find a regular symbol table inside. +// Iterate over symbols in the symbol table and look for the symbol +// containing the program counter. If such a symbol is found, +// obtain the symbol name, and demangle the symbol if possible. +// If the symbol isn't found in the regular symbol table (binary is +// stripped), try the same thing with a dynamic symbol table. +// +// Note that Symbolize() is originally implemented to be used in +// signal handlers, hence it doesn't use malloc() and other unsafe +// operations. It should be both thread-safe and async-signal-safe. +// +// Implementation note: +// +// We don't use heaps but only use stacks. We want to reduce the +// stack consumption so that the symbolizer can run on small stacks. +// +// Here are some numbers collected with GCC 4.1.0 on x86: +// - sizeof(Elf32_Sym) = 16 +// - sizeof(Elf32_Shdr) = 40 +// - sizeof(Elf64_Sym) = 24 +// - sizeof(Elf64_Shdr) = 64 +// +// This implementation is intended to be async-signal-safe but uses some +// functions which are not guaranteed to be so, such as memchr() and +// memmove(). We assume they are async-signal-safe. + +#include +#include +#include +#include // For ElfW() macro. +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_alloc.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/port.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/debugging/internal/vdso_support.h" +#include "absl/strings/string_view.h" + +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Value of argv[0]. Used by MaybeInitializeObjFile(). +static char *argv0_value = nullptr; + +void InitializeSymbolizer(const char *argv0) { +#ifdef ABSL_HAVE_VDSO_SUPPORT + // We need to make sure VDSOSupport::Init() is called before any setuid or + // chroot calls, so InitializeSymbolizer() should be called very early in the + // life of a program. + absl::debugging_internal::VDSOSupport::Init(); +#endif + if (argv0_value != nullptr) { + free(argv0_value); + argv0_value = nullptr; + } + if (argv0 != nullptr && argv0[0] != '\0') { + argv0_value = strdup(argv0); + } +} + +namespace debugging_internal { +namespace { + +// Re-runs fn until it doesn't cause EINTR. +#define NO_INTR(fn) \ + do { \ + } while ((fn) < 0 && errno == EINTR) + +// On Linux, ELF_ST_* are defined in . To make this portable +// we define our own ELF_ST_BIND and ELF_ST_TYPE if not available. +#ifndef ELF_ST_BIND +#define ELF_ST_BIND(info) (((unsigned char)(info)) >> 4) +#endif + +#ifndef ELF_ST_TYPE +#define ELF_ST_TYPE(info) (((unsigned char)(info)) & 0xF) +#endif + +// Some platforms use a special .opd section to store function pointers. +const char kOpdSectionName[] = ".opd"; + +#if (defined(__powerpc__) && !(_CALL_ELF > 1)) || defined(__ia64) +// Use opd section for function descriptors on these platforms, the function +// address is the first word of the descriptor. +enum { kPlatformUsesOPDSections = 1 }; +#else // not PPC or IA64 +enum { kPlatformUsesOPDSections = 0 }; +#endif + +// This works for PowerPC & IA64 only. A function descriptor consist of two +// pointers and the first one is the function's entry. +const size_t kFunctionDescriptorSize = sizeof(void *) * 2; + +const int kMaxDecorators = 10; // Seems like a reasonable upper limit. + +struct InstalledSymbolDecorator { + SymbolDecorator fn; + void *arg; + int ticket; +}; + +int g_num_decorators; +InstalledSymbolDecorator g_decorators[kMaxDecorators]; + +struct FileMappingHint { + const void *start; + const void *end; + uint64_t offset; + const char *filename; +}; + +// Protects g_decorators. +// We are using SpinLock and not a Mutex here, because we may be called +// from inside Mutex::Lock itself, and it prohibits recursive calls. +// This happens in e.g. base/stacktrace_syscall_unittest. +// Moreover, we are using only TryLock(), if the decorator list +// is being modified (is busy), we skip all decorators, and possibly +// loose some info. Sorry, that's the best we could do. +ABSL_CONST_INIT absl::base_internal::SpinLock g_decorators_mu( + absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + +const int kMaxFileMappingHints = 8; +int g_num_file_mapping_hints; +FileMappingHint g_file_mapping_hints[kMaxFileMappingHints]; +// Protects g_file_mapping_hints. +ABSL_CONST_INIT absl::base_internal::SpinLock g_file_mapping_mu( + absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + +// Async-signal-safe function to zero a buffer. +// memset() is not guaranteed to be async-signal-safe. +static void SafeMemZero(void* p, size_t size) { + unsigned char *c = static_cast(p); + while (size--) { + *c++ = 0; + } +} + +struct ObjFile { + ObjFile() + : filename(nullptr), + start_addr(nullptr), + end_addr(nullptr), + offset(0), + fd(-1), + elf_type(-1) { + SafeMemZero(&elf_header, sizeof(elf_header)); + SafeMemZero(&phdr[0], sizeof(phdr)); + } + + char *filename; + const void *start_addr; + const void *end_addr; + uint64_t offset; + + // The following fields are initialized on the first access to the + // object file. + int fd; + int elf_type; + ElfW(Ehdr) elf_header; + + // PT_LOAD program header describing executable code. + // Normally we expect just one, but SWIFT binaries have two. + // CUDA binaries have 3 (see cr/473913254 description). + std::array phdr; +}; + +// Build 4-way associative cache for symbols. Within each cache line, symbols +// are replaced in LRU order. +enum { + ASSOCIATIVITY = 4, +}; +struct SymbolCacheLine { + const void *pc[ASSOCIATIVITY]; + char *name[ASSOCIATIVITY]; + + // age[i] is incremented when a line is accessed. it's reset to zero if the + // i'th entry is read. + uint32_t age[ASSOCIATIVITY]; +}; + +// --------------------------------------------------------------- +// An async-signal-safe arena for LowLevelAlloc +static std::atomic g_sig_safe_arena; + +static base_internal::LowLevelAlloc::Arena *SigSafeArena() { + return g_sig_safe_arena.load(std::memory_order_acquire); +} + +static void InitSigSafeArena() { + if (SigSafeArena() == nullptr) { + base_internal::LowLevelAlloc::Arena *new_arena = + base_internal::LowLevelAlloc::NewArena( + base_internal::LowLevelAlloc::kAsyncSignalSafe); + base_internal::LowLevelAlloc::Arena *old_value = nullptr; + if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena, + std::memory_order_release, + std::memory_order_relaxed)) { + // We lost a race to allocate an arena; deallocate. + base_internal::LowLevelAlloc::DeleteArena(new_arena); + } + } +} + +// --------------------------------------------------------------- +// An AddrMap is a vector of ObjFile, using SigSafeArena() for allocation. + +class AddrMap { + public: + AddrMap() : size_(0), allocated_(0), obj_(nullptr) {} + ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); } + size_t Size() const { return size_; } + ObjFile *At(size_t i) { return &obj_[i]; } + ObjFile *Add(); + void Clear(); + + private: + size_t size_; // count of valid elements (<= allocated_) + size_t allocated_; // count of allocated elements + ObjFile *obj_; // array of allocated_ elements + AddrMap(const AddrMap &) = delete; + AddrMap &operator=(const AddrMap &) = delete; +}; + +void AddrMap::Clear() { + for (size_t i = 0; i != size_; i++) { + At(i)->~ObjFile(); + } + size_ = 0; +} + +ObjFile *AddrMap::Add() { + if (size_ == allocated_) { + size_t new_allocated = allocated_ * 2 + 50; + ObjFile *new_obj_ = + static_cast(base_internal::LowLevelAlloc::AllocWithArena( + new_allocated * sizeof(*new_obj_), SigSafeArena())); + if (obj_) { + memcpy(new_obj_, obj_, allocated_ * sizeof(*new_obj_)); + base_internal::LowLevelAlloc::Free(obj_); + } + obj_ = new_obj_; + allocated_ = new_allocated; + } + return new (&obj_[size_++]) ObjFile; +} + +// --------------------------------------------------------------- + +enum FindSymbolResult { SYMBOL_NOT_FOUND = 1, SYMBOL_TRUNCATED, SYMBOL_FOUND }; + +class Symbolizer { + public: + Symbolizer(); + ~Symbolizer(); + const char *GetSymbol(const void *const pc); + + private: + char *CopyString(const char *s) { + size_t len = strlen(s); + char *dst = static_cast( + base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + ABSL_RAW_CHECK(dst != nullptr, "out of memory"); + memcpy(dst, s, len + 1); + return dst; + } + ObjFile *FindObjFile(const void *const start, + size_t size) ABSL_ATTRIBUTE_NOINLINE; + static bool RegisterObjFile(const char *filename, + const void *const start_addr, + const void *const end_addr, uint64_t offset, + void *arg); + SymbolCacheLine *GetCacheLine(const void *const pc); + const char *FindSymbolInCache(const void *const pc); + const char *InsertSymbolInCache(const void *const pc, const char *name); + void AgeSymbols(SymbolCacheLine *line); + void ClearAddrMap(); + FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj, + const void *const pc, + const ptrdiff_t relocation, + char *out, size_t out_size, + char *tmp_buf, size_t tmp_buf_size); + const char *GetUncachedSymbol(const void *pc); + + enum { + SYMBOL_BUF_SIZE = 3072, + TMP_BUF_SIZE = 1024, + SYMBOL_CACHE_LINES = 128, + }; + + AddrMap addr_map_; + + bool ok_; + bool addr_map_read_; + + char symbol_buf_[SYMBOL_BUF_SIZE]; + + // tmp_buf_ will be used to store arrays of ElfW(Shdr) and ElfW(Sym) + // so we ensure that tmp_buf_ is properly aligned to store either. + alignas(16) char tmp_buf_[TMP_BUF_SIZE]; + static_assert(alignof(ElfW(Shdr)) <= 16, + "alignment of tmp buf too small for Shdr"); + static_assert(alignof(ElfW(Sym)) <= 16, + "alignment of tmp buf too small for Sym"); + + SymbolCacheLine symbol_cache_[SYMBOL_CACHE_LINES]; +}; + +static std::atomic g_cached_symbolizer; + +} // namespace + +static size_t SymbolizerSize() { +#if defined(__wasm__) || defined(__asmjs__) + auto pagesize = static_cast(getpagesize()); +#else + auto pagesize = static_cast(sysconf(_SC_PAGESIZE)); +#endif + return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize; +} + +// Return (and set null) g_cached_symbolized_state if it is not null. +// Otherwise return a new symbolizer. +static Symbolizer *AllocateSymbolizer() { + InitSigSafeArena(); + Symbolizer *symbolizer = + g_cached_symbolizer.exchange(nullptr, std::memory_order_acquire); + if (symbolizer != nullptr) { + return symbolizer; + } + return new (base_internal::LowLevelAlloc::AllocWithArena( + SymbolizerSize(), SigSafeArena())) Symbolizer(); +} + +// Set g_cached_symbolize_state to s if it is null, otherwise +// delete s. +static void FreeSymbolizer(Symbolizer *s) { + Symbolizer *old_cached_symbolizer = nullptr; + if (!g_cached_symbolizer.compare_exchange_strong(old_cached_symbolizer, s, + std::memory_order_release, + std::memory_order_relaxed)) { + s->~Symbolizer(); + base_internal::LowLevelAlloc::Free(s); + } +} + +Symbolizer::Symbolizer() : ok_(true), addr_map_read_(false) { + for (SymbolCacheLine &symbol_cache_line : symbol_cache_) { + for (size_t j = 0; j < ABSL_ARRAYSIZE(symbol_cache_line.name); ++j) { + symbol_cache_line.pc[j] = nullptr; + symbol_cache_line.name[j] = nullptr; + symbol_cache_line.age[j] = 0; + } + } +} + +Symbolizer::~Symbolizer() { + for (SymbolCacheLine &symbol_cache_line : symbol_cache_) { + for (char *s : symbol_cache_line.name) { + base_internal::LowLevelAlloc::Free(s); + } + } + ClearAddrMap(); +} + +// We don't use assert() since it's not guaranteed to be +// async-signal-safe. Instead we define a minimal assertion +// macro. So far, we don't need pretty printing for __FILE__, etc. +#define SAFE_ASSERT(expr) ((expr) ? static_cast(0) : abort()) + +// Read up to "count" bytes from file descriptor "fd" into the buffer +// starting at "buf" while handling short reads and EINTR. On +// success, return the number of bytes read. Otherwise, return -1. +static ssize_t ReadPersistent(int fd, void *buf, size_t count) { + SAFE_ASSERT(fd >= 0); + SAFE_ASSERT(count <= SSIZE_MAX); + char *buf0 = reinterpret_cast(buf); + size_t num_bytes = 0; + while (num_bytes < count) { + ssize_t len; + NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes)); + if (len < 0) { // There was an error other than EINTR. + ABSL_RAW_LOG(WARNING, "read failed: errno=%d", errno); + return -1; + } + if (len == 0) { // Reached EOF. + break; + } + num_bytes += static_cast(len); + } + SAFE_ASSERT(num_bytes <= count); + return static_cast(num_bytes); +} + +// Read up to "count" bytes from "offset" in the file pointed by file +// descriptor "fd" into the buffer starting at "buf". On success, +// return the number of bytes read. Otherwise, return -1. +static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count, + const off_t offset) { + off_t off = lseek(fd, offset, SEEK_SET); + if (off == (off_t)-1) { + ABSL_RAW_LOG(WARNING, "lseek(%d, %jd, SEEK_SET) failed: errno=%d", fd, + static_cast(offset), errno); + return -1; + } + return ReadPersistent(fd, buf, count); +} + +// Try reading exactly "count" bytes from "offset" bytes in a file +// pointed by "fd" into the buffer starting at "buf" while handling +// short reads and EINTR. On success, return true. Otherwise, return +// false. +static bool ReadFromOffsetExact(const int fd, void *buf, const size_t count, + const off_t offset) { + ssize_t len = ReadFromOffset(fd, buf, count, offset); + return len >= 0 && static_cast(len) == count; +} + +// Returns elf_header.e_type if the file pointed by fd is an ELF binary. +static int FileGetElfType(const int fd) { + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return -1; + } + if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) { + return -1; + } + return elf_header.e_type; +} + +// Read the section headers in the given ELF binary, and if a section +// of the specified type is found, set the output to this section header +// and return true. Otherwise, return false. +// To keep stack consumption low, we would like this function to not get +// inlined. +static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType( + const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type, + ElfW(Shdr) * out, char *tmp_buf, size_t tmp_buf_size) { + ElfW(Shdr) *buf = reinterpret_cast(tmp_buf); + const size_t buf_entries = tmp_buf_size / sizeof(buf[0]); + const size_t buf_bytes = buf_entries * sizeof(buf[0]); + + for (size_t i = 0; static_cast(i) < sh_num;) { + const size_t num_bytes_left = + (static_cast(sh_num) - i) * sizeof(buf[0]); + const size_t num_bytes_to_read = + (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes; + const off_t offset = sh_offset + static_cast(i * sizeof(buf[0])); + const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset); + if (len < 0) { + ABSL_RAW_LOG( + WARNING, + "Reading %zu bytes from offset %ju returned %zd which is negative.", + num_bytes_to_read, static_cast(offset), len); + return false; + } + if (static_cast(len) % sizeof(buf[0]) != 0) { + ABSL_RAW_LOG( + WARNING, + "Reading %zu bytes from offset %jd returned %zd which is not a " + "multiple of %zu.", + num_bytes_to_read, static_cast(offset), len, + sizeof(buf[0])); + return false; + } + const size_t num_headers_in_buf = static_cast(len) / sizeof(buf[0]); + SAFE_ASSERT(num_headers_in_buf <= buf_entries); + for (size_t j = 0; j < num_headers_in_buf; ++j) { + if (buf[j].sh_type == type) { + *out = buf[j]; + return true; + } + } + i += num_headers_in_buf; + } + return false; +} + +// There is no particular reason to limit section name to 63 characters, +// but there has (as yet) been no need for anything longer either. +const int kMaxSectionNameLen = 64; + +bool ForEachSection(int fd, + const std::function &callback) { + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return false; + } + + // Technically it can be larger, but in practice this never happens. + if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) { + return false; + } + + ElfW(Shdr) shstrtab; + off_t shstrtab_offset = static_cast(elf_header.e_shoff) + + elf_header.e_shentsize * elf_header.e_shstrndx; + if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { + return false; + } + + for (int i = 0; i < elf_header.e_shnum; ++i) { + ElfW(Shdr) out; + off_t section_header_offset = + static_cast(elf_header.e_shoff) + elf_header.e_shentsize * i; + if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) { + return false; + } + off_t name_offset = static_cast(shstrtab.sh_offset) + out.sh_name; + char header_name[kMaxSectionNameLen]; + ssize_t n_read = + ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset); + if (n_read < 0) { + return false; + } else if (n_read > kMaxSectionNameLen) { + // Long read? + return false; + } + + absl::string_view name(header_name, + strnlen(header_name, static_cast(n_read))); + if (!callback(name, out)) { + break; + } + } + return true; +} + +// name_len should include terminating '\0'. +bool GetSectionHeaderByName(int fd, const char *name, size_t name_len, + ElfW(Shdr) * out) { + char header_name[kMaxSectionNameLen]; + if (sizeof(header_name) < name_len) { + ABSL_RAW_LOG(WARNING, + "Section name '%s' is too long (%zu); " + "section will not be found (even if present).", + name, name_len); + // No point in even trying. + return false; + } + + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return false; + } + + // Technically it can be larger, but in practice this never happens. + if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) { + return false; + } + + ElfW(Shdr) shstrtab; + off_t shstrtab_offset = static_cast(elf_header.e_shoff) + + elf_header.e_shentsize * elf_header.e_shstrndx; + if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { + return false; + } + + for (int i = 0; i < elf_header.e_shnum; ++i) { + off_t section_header_offset = + static_cast(elf_header.e_shoff) + elf_header.e_shentsize * i; + if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) { + return false; + } + off_t name_offset = static_cast(shstrtab.sh_offset) + out->sh_name; + ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset); + if (n_read < 0) { + return false; + } else if (static_cast(n_read) != name_len) { + // Short read -- name could be at end of file. + continue; + } + if (memcmp(header_name, name, name_len) == 0) { + return true; + } + } + return false; +} + +// Compare symbols at in the same address. +// Return true if we should pick symbol1. +static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1, + const ElfW(Sym) & symbol2) { + // If one of the symbols is weak and the other is not, pick the one + // this is not a weak symbol. + char bind1 = ELF_ST_BIND(symbol1.st_info); + char bind2 = ELF_ST_BIND(symbol1.st_info); + if (bind1 == STB_WEAK && bind2 != STB_WEAK) return false; + if (bind2 == STB_WEAK && bind1 != STB_WEAK) return true; + + // If one of the symbols has zero size and the other is not, pick the + // one that has non-zero size. + if (symbol1.st_size != 0 && symbol2.st_size == 0) { + return true; + } + if (symbol1.st_size == 0 && symbol2.st_size != 0) { + return false; + } + + // If one of the symbols has no type and the other is not, pick the + // one that has a type. + char type1 = ELF_ST_TYPE(symbol1.st_info); + char type2 = ELF_ST_TYPE(symbol1.st_info); + if (type1 != STT_NOTYPE && type2 == STT_NOTYPE) { + return true; + } + if (type1 == STT_NOTYPE && type2 != STT_NOTYPE) { + return false; + } + + // Pick the first one, if we still cannot decide. + return true; +} + +// Return true if an address is inside a section. +static bool InSection(const void *address, ptrdiff_t relocation, + const ElfW(Shdr) * section) { + const char *start = reinterpret_cast( + section->sh_addr + static_cast(relocation)); + size_t size = static_cast(section->sh_size); + return start <= address && address < (start + size); +} + +static const char *ComputeOffset(const char *base, ptrdiff_t offset) { + // Note: cast to intptr_t to avoid undefined behavior when base evaluates to + // zero and offset is non-zero. + return reinterpret_cast(reinterpret_cast(base) + + offset); +} + +// Read a symbol table and look for the symbol containing the +// pc. Iterate over symbols in a symbol table and look for the symbol +// containing "pc". If the symbol is found, and its name fits in +// out_size, the name is written into out and SYMBOL_FOUND is returned. +// If the name does not fit, truncated name is written into out, +// and SYMBOL_TRUNCATED is returned. Out is NUL-terminated. +// If the symbol is not found, SYMBOL_NOT_FOUND is returned; +// To keep stack consumption low, we would like this function to not get +// inlined. +static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( + const void *const pc, const int fd, char *out, size_t out_size, + ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab, + const ElfW(Shdr) * opd, char *tmp_buf, size_t tmp_buf_size) { + if (symtab == nullptr) { + return SYMBOL_NOT_FOUND; + } + + // Read multiple symbols at once to save read() calls. + ElfW(Sym) *buf = reinterpret_cast(tmp_buf); + const size_t buf_entries = tmp_buf_size / sizeof(buf[0]); + + const size_t num_symbols = symtab->sh_size / symtab->sh_entsize; + + // On platforms using an .opd section (PowerPC & IA64), a function symbol + // has the address of a function descriptor, which contains the real + // starting address. However, we do not always want to use the real + // starting address because we sometimes want to symbolize a function + // pointer into the .opd section, e.g. FindSymbol(&foo,...). + const bool pc_in_opd = kPlatformUsesOPDSections && opd != nullptr && + InSection(pc, relocation, opd); + const bool deref_function_descriptor_pointer = + kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd; + + ElfW(Sym) best_match; + SafeMemZero(&best_match, sizeof(best_match)); + bool found_match = false; + for (size_t i = 0; i < num_symbols;) { + off_t offset = + static_cast(symtab->sh_offset + i * symtab->sh_entsize); + const size_t num_remaining_symbols = num_symbols - i; + const size_t entries_in_chunk = + std::min(num_remaining_symbols, buf_entries); + const size_t bytes_in_chunk = entries_in_chunk * sizeof(buf[0]); + const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset); + SAFE_ASSERT(len >= 0); + SAFE_ASSERT(static_cast(len) % sizeof(buf[0]) == 0); + const size_t num_symbols_in_buf = static_cast(len) / sizeof(buf[0]); + SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk); + for (size_t j = 0; j < num_symbols_in_buf; ++j) { + const ElfW(Sym) &symbol = buf[j]; + + // For a DSO, a symbol address is relocated by the loading address. + // We keep the original address for opd redirection below. + const char *const original_start_address = + reinterpret_cast(symbol.st_value); + const char *start_address = + ComputeOffset(original_start_address, relocation); + +#ifdef __arm__ + // ARM functions are always aligned to multiples of two bytes; the + // lowest-order bit in start_address is ignored by the CPU and indicates + // whether the function contains ARM (0) or Thumb (1) code. We don't care + // about what encoding is being used; we just want the real start address + // of the function. + start_address = reinterpret_cast( + reinterpret_cast(start_address) & ~1u); +#endif + + if (deref_function_descriptor_pointer && + InSection(original_start_address, /*relocation=*/0, opd)) { + // The opd section is mapped into memory. Just dereference + // start_address to get the first double word, which points to the + // function entry. + start_address = *reinterpret_cast(start_address); + } + + // If pc is inside the .opd section, it points to a function descriptor. + const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size; + const void *const end_address = + ComputeOffset(start_address, static_cast(size)); + if (symbol.st_value != 0 && // Skip null value symbols. + symbol.st_shndx != 0 && // Skip undefined symbols. +#ifdef STT_TLS + ELF_ST_TYPE(symbol.st_info) != STT_TLS && // Skip thread-local data. +#endif // STT_TLS + ((start_address <= pc && pc < end_address) || + (start_address == pc && pc == end_address))) { + if (!found_match || ShouldPickFirstSymbol(symbol, best_match)) { + found_match = true; + best_match = symbol; + } + } + } + i += num_symbols_in_buf; + } + + if (found_match) { + const off_t off = + static_cast(strtab->sh_offset) + best_match.st_name; + const ssize_t n_read = ReadFromOffset(fd, out, out_size, off); + if (n_read <= 0) { + // This should never happen. + ABSL_RAW_LOG(WARNING, + "Unable to read from fd %d at offset %lld: n_read = %zd", fd, + static_cast(off), n_read); + return SYMBOL_NOT_FOUND; + } + ABSL_RAW_CHECK(static_cast(n_read) <= out_size, + "ReadFromOffset read too much data."); + + // strtab->sh_offset points into .strtab-like section that contains + // NUL-terminated strings: '\0foo\0barbaz\0...". + // + // sh_offset+st_name points to the start of symbol name, but we don't know + // how long the symbol is, so we try to read as much as we have space for, + // and usually over-read (i.e. there is a NUL somewhere before n_read). + if (memchr(out, '\0', static_cast(n_read)) == nullptr) { + // Either out_size was too small (n_read == out_size and no NUL), or + // we tried to read past the EOF (n_read < out_size) and .strtab is + // corrupt (missing terminating NUL; should never happen for valid ELF). + out[n_read - 1] = '\0'; + return SYMBOL_TRUNCATED; + } + return SYMBOL_FOUND; + } + + return SYMBOL_NOT_FOUND; +} + +// Get the symbol name of "pc" from the file pointed by "fd". Process +// both regular and dynamic symbol tables if necessary. +// See FindSymbol() comment for description of return value. +FindSymbolResult Symbolizer::GetSymbolFromObjectFile( + const ObjFile &obj, const void *const pc, const ptrdiff_t relocation, + char *out, size_t out_size, char *tmp_buf, size_t tmp_buf_size) { + ElfW(Shdr) symtab; + ElfW(Shdr) strtab; + ElfW(Shdr) opd; + ElfW(Shdr) *opd_ptr = nullptr; + + // On platforms using an .opd sections for function descriptor, read + // the section header. The .opd section is in data segment and should be + // loaded but we check that it is mapped just to be extra careful. + if (kPlatformUsesOPDSections) { + if (GetSectionHeaderByName(obj.fd, kOpdSectionName, + sizeof(kOpdSectionName) - 1, &opd) && + FindObjFile(reinterpret_cast(opd.sh_addr) + relocation, + opd.sh_size) != nullptr) { + opd_ptr = &opd; + } else { + return SYMBOL_NOT_FOUND; + } + } + + // Consult a regular symbol table, then fall back to the dynamic symbol table. + for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) { + if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum, + static_cast(obj.elf_header.e_shoff), + static_cast(symbol_table_type), + &symtab, tmp_buf, tmp_buf_size)) { + continue; + } + if (!ReadFromOffsetExact( + obj.fd, &strtab, sizeof(strtab), + static_cast(obj.elf_header.e_shoff + + symtab.sh_link * sizeof(symtab)))) { + continue; + } + const FindSymbolResult rc = + FindSymbol(pc, obj.fd, out, out_size, relocation, &strtab, &symtab, + opd_ptr, tmp_buf, tmp_buf_size); + if (rc != SYMBOL_NOT_FOUND) { + return rc; + } + } + + return SYMBOL_NOT_FOUND; +} + +namespace { +// Thin wrapper around a file descriptor so that the file descriptor +// gets closed for sure. +class FileDescriptor { + public: + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(const FileDescriptor &) = delete; + FileDescriptor &operator=(const FileDescriptor &) = delete; + + ~FileDescriptor() { + if (fd_ >= 0) { + close(fd_); + } + } + + int get() const { return fd_; } + + private: + const int fd_; +}; + +// Helper class for reading lines from file. +// +// Note: we don't use ProcMapsIterator since the object is big (it has +// a 5k array member) and uses async-unsafe functions such as sscanf() +// and snprintf(). +class LineReader { + public: + explicit LineReader(int fd, char *buf, size_t buf_len) + : fd_(fd), + buf_len_(buf_len), + buf_(buf), + bol_(buf), + eol_(buf), + eod_(buf) {} + + LineReader(const LineReader &) = delete; + LineReader &operator=(const LineReader &) = delete; + + // Read '\n'-terminated line from file. On success, modify "bol" + // and "eol", then return true. Otherwise, return false. + // + // Note: if the last line doesn't end with '\n', the line will be + // dropped. It's an intentional behavior to make the code simple. + bool ReadLine(const char **bol, const char **eol) { + if (BufferIsEmpty()) { // First time. + const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_); + if (num_bytes <= 0) { // EOF or error. + return false; + } + eod_ = buf_ + num_bytes; + bol_ = buf_; + } else { + bol_ = eol_ + 1; // Advance to the next line in the buffer. + SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_". + if (!HasCompleteLine()) { + const auto incomplete_line_length = static_cast(eod_ - bol_); + // Move the trailing incomplete line to the beginning. + memmove(buf_, bol_, incomplete_line_length); + // Read text from file and append it. + char *const append_pos = buf_ + incomplete_line_length; + const size_t capacity_left = buf_len_ - incomplete_line_length; + const ssize_t num_bytes = + ReadPersistent(fd_, append_pos, capacity_left); + if (num_bytes <= 0) { // EOF or error. + return false; + } + eod_ = append_pos + num_bytes; + bol_ = buf_; + } + } + eol_ = FindLineFeed(); + if (eol_ == nullptr) { // '\n' not found. Malformed line. + return false; + } + *eol_ = '\0'; // Replace '\n' with '\0'. + + *bol = bol_; + *eol = eol_; + return true; + } + + private: + char *FindLineFeed() const { + return reinterpret_cast( + memchr(bol_, '\n', static_cast(eod_ - bol_))); + } + + bool BufferIsEmpty() const { return buf_ == eod_; } + + bool HasCompleteLine() const { + return !BufferIsEmpty() && FindLineFeed() != nullptr; + } + + const int fd_; + const size_t buf_len_; + char *const buf_; + char *bol_; + char *eol_; + const char *eod_; // End of data in "buf_". +}; +} // namespace + +// Place the hex number read from "start" into "*hex". The pointer to +// the first non-hex character or "end" is returned. +static const char *GetHex(const char *start, const char *end, + uint64_t *const value) { + uint64_t hex = 0; + const char *p; + for (p = start; p < end; ++p) { + int ch = *p; + if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')) { + hex = (hex << 4) | + static_cast(ch < 'A' ? ch - '0' : (ch & 0xF) + 9); + } else { // Encountered the first non-hex character. + break; + } + } + SAFE_ASSERT(p <= end); + *value = hex; + return p; +} + +static const char *GetHex(const char *start, const char *end, + const void **const addr) { + uint64_t hex = 0; + const char *p = GetHex(start, end, &hex); + *addr = reinterpret_cast(hex); + return p; +} + +// Normally we are only interested in "r?x" maps. +// On the PowerPC, function pointers point to descriptors in the .opd +// section. The descriptors themselves are not executable code, so +// we need to relax the check below to "r??". +static bool ShouldUseMapping(const char *const flags) { + return flags[0] == 'r' && (kPlatformUsesOPDSections || flags[2] == 'x'); +} + +// Read /proc/self/maps and run "callback" for each mmapped file found. If +// "callback" returns false, stop scanning and return true. Else continue +// scanning /proc/self/maps. Return true if no parse error is found. +static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap( + bool (*callback)(const char *filename, const void *const start_addr, + const void *const end_addr, uint64_t offset, void *arg), + void *arg, void *tmp_buf, size_t tmp_buf_size) { + // Use /proc/self/task//maps instead of /proc/self/maps. The latter + // requires kernel to stop all threads, and is significantly slower when there + // are 1000s of threads. + char maps_path[80]; + snprintf(maps_path, sizeof(maps_path), "/proc/self/task/%d/maps", getpid()); + + int maps_fd; + NO_INTR(maps_fd = open(maps_path, O_RDONLY)); + FileDescriptor wrapped_maps_fd(maps_fd); + if (wrapped_maps_fd.get() < 0) { + ABSL_RAW_LOG(WARNING, "%s: errno=%d", maps_path, errno); + return false; + } + + // Iterate over maps and look for the map containing the pc. Then + // look into the symbol tables inside. + LineReader reader(wrapped_maps_fd.get(), static_cast(tmp_buf), + tmp_buf_size); + while (true) { + const char *cursor; + const char *eol; + if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line. + break; + } + + const char *line = cursor; + const void *start_address; + // Start parsing line in /proc/self/maps. Here is an example: + // + // 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat + // + // We want start address (08048000), end address (0804c000), flags + // (r-xp) and file name (/bin/cat). + + // Read start address. + cursor = GetHex(cursor, eol, &start_address); + if (cursor == eol || *cursor != '-') { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line); + return false; + } + ++cursor; // Skip '-'. + + // Read end address. + const void *end_address; + cursor = GetHex(cursor, eol, &end_address); + if (cursor == eol || *cursor != ' ') { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line); + return false; + } + ++cursor; // Skip ' '. + + // Read flags. Skip flags until we encounter a space or eol. + const char *const flags_start = cursor; + while (cursor < eol && *cursor != ' ') { + ++cursor; + } + // We expect at least four letters for flags (ex. "r-xp"). + if (cursor == eol || cursor < flags_start + 4) { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps: %s", line); + return false; + } + + // Check flags. + if (!ShouldUseMapping(flags_start)) { + continue; // We skip this map. + } + ++cursor; // Skip ' '. + + // Read file offset. + uint64_t offset; + cursor = GetHex(cursor, eol, &offset); + ++cursor; // Skip ' '. + + // Skip to file name. "cursor" now points to dev. We need to skip at least + // two spaces for dev and inode. + int num_spaces = 0; + while (cursor < eol) { + if (*cursor == ' ') { + ++num_spaces; + } else if (num_spaces >= 2) { + // The first non-space character after skipping two spaces + // is the beginning of the file name. + break; + } + ++cursor; + } + + // Check whether this entry corresponds to our hint table for the true + // filename. + bool hinted = + GetFileMappingHint(&start_address, &end_address, &offset, &cursor); + if (!hinted && (cursor == eol || cursor[0] == '[')) { + // not an object file, typically [vdso] or [vsyscall] + continue; + } + if (!callback(cursor, start_address, end_address, offset, arg)) break; + } + return true; +} + +// Find the objfile mapped in address region containing [addr, addr + len). +ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) { + for (int i = 0; i < 2; ++i) { + if (!ok_) return nullptr; + + // Read /proc/self/maps if necessary + if (!addr_map_read_) { + addr_map_read_ = true; + if (!ReadAddrMap(RegisterObjFile, this, tmp_buf_, TMP_BUF_SIZE)) { + ok_ = false; + return nullptr; + } + } + + size_t lo = 0; + size_t hi = addr_map_.Size(); + while (lo < hi) { + size_t mid = (lo + hi) / 2; + if (addr < addr_map_.At(mid)->end_addr) { + hi = mid; + } else { + lo = mid + 1; + } + } + if (lo != addr_map_.Size()) { + ObjFile *obj = addr_map_.At(lo); + SAFE_ASSERT(obj->end_addr > addr); + if (addr >= obj->start_addr && + reinterpret_cast(addr) + len <= obj->end_addr) + return obj; + } + + // The address mapping may have changed since it was last read. Retry. + ClearAddrMap(); + } + return nullptr; +} + +void Symbolizer::ClearAddrMap() { + for (size_t i = 0; i != addr_map_.Size(); i++) { + ObjFile *o = addr_map_.At(i); + base_internal::LowLevelAlloc::Free(o->filename); + if (o->fd >= 0) { + close(o->fd); + } + } + addr_map_.Clear(); + addr_map_read_ = false; +} + +// Callback for ReadAddrMap to register objfiles in an in-memory table. +bool Symbolizer::RegisterObjFile(const char *filename, + const void *const start_addr, + const void *const end_addr, uint64_t offset, + void *arg) { + Symbolizer *impl = static_cast(arg); + + // Files are supposed to be added in the increasing address order. Make + // sure that's the case. + size_t addr_map_size = impl->addr_map_.Size(); + if (addr_map_size != 0) { + ObjFile *old = impl->addr_map_.At(addr_map_size - 1); + if (old->end_addr > end_addr) { + ABSL_RAW_LOG(ERROR, + "Unsorted addr map entry: 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR + ": %s", + reinterpret_cast(end_addr), filename, + reinterpret_cast(old->end_addr), old->filename); + return true; + } else if (old->end_addr == end_addr) { + // The same entry appears twice. This sometimes happens for [vdso]. + if (old->start_addr != start_addr || + strcmp(old->filename, filename) != 0) { + ABSL_RAW_LOG(ERROR, + "Duplicate addr 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR ": %s", + reinterpret_cast(end_addr), filename, + reinterpret_cast(old->end_addr), old->filename); + } + return true; + } else if (old->end_addr == start_addr && + reinterpret_cast(old->start_addr) - old->offset == + reinterpret_cast(start_addr) - offset && + strcmp(old->filename, filename) == 0) { + // Two contiguous map entries that span a contiguous region of the file, + // perhaps because some part of the file was mlock()ed. Combine them. + old->end_addr = end_addr; + return true; + } + } + ObjFile *obj = impl->addr_map_.Add(); + obj->filename = impl->CopyString(filename); + obj->start_addr = start_addr; + obj->end_addr = end_addr; + obj->offset = offset; + obj->elf_type = -1; // filled on demand + obj->fd = -1; // opened on demand + return true; +} + +// This function wraps the Demangle function to provide an interface +// where the input symbol is demangled in-place. +// To keep stack consumption low, we would like this function to not +// get inlined. +static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, size_t out_size, + char *tmp_buf, + size_t tmp_buf_size) { + if (Demangle(out, tmp_buf, tmp_buf_size)) { + // Demangling succeeded. Copy to out if the space allows. + size_t len = strlen(tmp_buf); + if (len + 1 <= out_size) { // +1 for '\0'. + SAFE_ASSERT(len < tmp_buf_size); + memmove(out, tmp_buf, len + 1); + } + } +} + +SymbolCacheLine *Symbolizer::GetCacheLine(const void *const pc) { + uintptr_t pc0 = reinterpret_cast(pc); + pc0 >>= 3; // drop the low 3 bits + + // Shuffle bits. + pc0 ^= (pc0 >> 6) ^ (pc0 >> 12) ^ (pc0 >> 18); + return &symbol_cache_[pc0 % SYMBOL_CACHE_LINES]; +} + +void Symbolizer::AgeSymbols(SymbolCacheLine *line) { + for (uint32_t &age : line->age) { + ++age; + } +} + +const char *Symbolizer::FindSymbolInCache(const void *const pc) { + if (pc == nullptr) return nullptr; + + SymbolCacheLine *line = GetCacheLine(pc); + for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) { + if (line->pc[i] == pc) { + AgeSymbols(line); + line->age[i] = 0; + return line->name[i]; + } + } + return nullptr; +} + +const char *Symbolizer::InsertSymbolInCache(const void *const pc, + const char *name) { + SAFE_ASSERT(pc != nullptr); + + SymbolCacheLine *line = GetCacheLine(pc); + uint32_t max_age = 0; + size_t oldest_index = 0; + bool found_oldest_index = false; + for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) { + if (line->pc[i] == nullptr) { + AgeSymbols(line); + line->pc[i] = pc; + line->name[i] = CopyString(name); + line->age[i] = 0; + return line->name[i]; + } + if (line->age[i] >= max_age) { + max_age = line->age[i]; + oldest_index = i; + found_oldest_index = true; + } + } + + AgeSymbols(line); + ABSL_RAW_CHECK(found_oldest_index, "Corrupt cache"); + base_internal::LowLevelAlloc::Free(line->name[oldest_index]); + line->pc[oldest_index] = pc; + line->name[oldest_index] = CopyString(name); + line->age[oldest_index] = 0; + return line->name[oldest_index]; +} + +static void MaybeOpenFdFromSelfExe(ObjFile *obj) { + if (memcmp(obj->start_addr, ELFMAG, SELFMAG) != 0) { + return; + } + int fd = open("/proc/self/exe", O_RDONLY); + if (fd == -1) { + return; + } + // Verify that contents of /proc/self/exe matches in-memory image of + // the binary. This can fail if the "deleted" binary is in fact not + // the main executable, or for binaries that have the first PT_LOAD + // segment smaller than 4K. We do it in four steps so that the + // buffer is smaller and we don't consume too much stack space. + const char *mem = reinterpret_cast(obj->start_addr); + for (int i = 0; i < 4; ++i) { + char buf[1024]; + ssize_t n = read(fd, buf, sizeof(buf)); + if (n != sizeof(buf) || memcmp(buf, mem, sizeof(buf)) != 0) { + close(fd); + return; + } + mem += sizeof(buf); + } + obj->fd = fd; +} + +static bool MaybeInitializeObjFile(ObjFile *obj) { + if (obj->fd < 0) { + obj->fd = open(obj->filename, O_RDONLY); + + if (obj->fd < 0) { + // Getting /proc/self/exe here means that we were hinted. + if (strcmp(obj->filename, "/proc/self/exe") == 0) { + // /proc/self/exe may be inaccessible (due to setuid, etc.), so try + // accessing the binary via argv0. + if (argv0_value != nullptr) { + obj->fd = open(argv0_value, O_RDONLY); + } + } else { + MaybeOpenFdFromSelfExe(obj); + } + } + + if (obj->fd < 0) { + ABSL_RAW_LOG(WARNING, "%s: open failed: errno=%d", obj->filename, errno); + return false; + } + obj->elf_type = FileGetElfType(obj->fd); + if (obj->elf_type < 0) { + ABSL_RAW_LOG(WARNING, "%s: wrong elf type: %d", obj->filename, + obj->elf_type); + return false; + } + + if (!ReadFromOffsetExact(obj->fd, &obj->elf_header, sizeof(obj->elf_header), + 0)) { + ABSL_RAW_LOG(WARNING, "%s: failed to read elf header", obj->filename); + return false; + } + const int phnum = obj->elf_header.e_phnum; + const int phentsize = obj->elf_header.e_phentsize; + auto phoff = static_cast(obj->elf_header.e_phoff); + size_t num_interesting_load_segments = 0; + for (int j = 0; j < phnum; j++) { + ElfW(Phdr) phdr; + if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) { + ABSL_RAW_LOG(WARNING, "%s: failed to read program header %d", + obj->filename, j); + return false; + } + phoff += phentsize; + +#if defined(__powerpc__) && !(_CALL_ELF > 1) + // On the PowerPC ELF v1 ABI, function pointers actually point to function + // descriptors. These descriptors are stored in an .opd section, which is + // mapped read-only. We thus need to look at all readable segments, not + // just the executable ones. + constexpr int interesting = PF_R; +#else + constexpr int interesting = PF_X | PF_R; +#endif + + if (phdr.p_type != PT_LOAD + || (phdr.p_flags & interesting) != interesting) { + // Not a LOAD segment, not executable code, and not a function + // descriptor. + continue; + } + if (num_interesting_load_segments < obj->phdr.size()) { + memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr)); + } else { + ABSL_RAW_LOG( + WARNING, "%s: too many interesting LOAD segments: %zu >= %zu", + obj->filename, num_interesting_load_segments, obj->phdr.size()); + break; + } + } + if (num_interesting_load_segments == 0) { + // This object has no interesting LOAD segments. That's unexpected. + ABSL_RAW_LOG(WARNING, "%s: no interesting LOAD segments", obj->filename); + return false; + } + } + return true; +} + +// The implementation of our symbolization routine. If it +// successfully finds the symbol containing "pc" and obtains the +// symbol name, returns pointer to that symbol. Otherwise, returns nullptr. +// If any symbol decorators have been installed via InstallSymbolDecorator(), +// they are called here as well. +// To keep stack consumption low, we would like this function to not +// get inlined. +const char *Symbolizer::GetUncachedSymbol(const void *pc) { + ObjFile *const obj = FindObjFile(pc, 1); + ptrdiff_t relocation = 0; + int fd = -1; + if (obj != nullptr) { + if (MaybeInitializeObjFile(obj)) { + const size_t start_addr = reinterpret_cast(obj->start_addr); + if (obj->elf_type == ET_DYN && start_addr >= obj->offset) { + // This object was relocated. + // + // For obj->offset > 0, adjust the relocation since a mapping at offset + // X in the file will have a start address of [true relocation]+X. + relocation = static_cast(start_addr - obj->offset); + + // Note: some binaries have multiple LOAD segments that can contain + // function pointers. We must find the right one. + ElfW(Phdr) *phdr = nullptr; + for (size_t j = 0; j < obj->phdr.size(); j++) { + ElfW(Phdr) &p = obj->phdr[j]; + if (p.p_type != PT_LOAD) { + // We only expect PT_LOADs. This must be PT_NULL that we didn't + // write over (i.e. we exhausted all interesting PT_LOADs). + ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type"); + break; + } + if (pc < reinterpret_cast(start_addr + p.p_vaddr + p.p_memsz)) { + phdr = &p; + break; + } + } + if (phdr == nullptr) { + // That's unexpected. Hope for the best. + ABSL_RAW_LOG( + WARNING, + "%s: unable to find LOAD segment for pc: %p, start_addr: %zx", + obj->filename, pc, start_addr); + } else { + // Adjust relocation in case phdr.p_vaddr != 0. + // This happens for binaries linked with `lld --rosegment`, and for + // binaries linked with BFD `ld -z separate-code`. + relocation -= phdr->p_vaddr - phdr->p_offset; + } + } + + fd = obj->fd; + if (GetSymbolFromObjectFile(*obj, pc, relocation, symbol_buf_, + sizeof(symbol_buf_), tmp_buf_, + sizeof(tmp_buf_)) == SYMBOL_FOUND) { + // Only try to demangle the symbol name if it fit into symbol_buf_. + DemangleInplace(symbol_buf_, sizeof(symbol_buf_), tmp_buf_, + sizeof(tmp_buf_)); + } + } + } else { +#if ABSL_HAVE_VDSO_SUPPORT + VDSOSupport vdso; + if (vdso.IsPresent()) { + VDSOSupport::SymbolInfo symbol_info; + if (vdso.LookupSymbolByAddress(pc, &symbol_info)) { + // All VDSO symbols are known to be short. + size_t len = strlen(symbol_info.name); + ABSL_RAW_CHECK(len + 1 < sizeof(symbol_buf_), + "VDSO symbol unexpectedly long"); + memcpy(symbol_buf_, symbol_info.name, len + 1); + } + } +#endif + } + + if (g_decorators_mu.TryLock()) { + if (g_num_decorators > 0) { + SymbolDecoratorArgs decorator_args = { + pc, relocation, fd, symbol_buf_, sizeof(symbol_buf_), + tmp_buf_, sizeof(tmp_buf_), nullptr}; + for (int i = 0; i < g_num_decorators; ++i) { + decorator_args.arg = g_decorators[i].arg; + g_decorators[i].fn(&decorator_args); + } + } + g_decorators_mu.Unlock(); + } + if (symbol_buf_[0] == '\0') { + return nullptr; + } + symbol_buf_[sizeof(symbol_buf_) - 1] = '\0'; // Paranoia. + return InsertSymbolInCache(pc, symbol_buf_); +} + +const char *Symbolizer::GetSymbol(const void *pc) { + const char *entry = FindSymbolInCache(pc); + if (entry != nullptr) { + return entry; + } + symbol_buf_[0] = '\0'; + +#ifdef __hppa__ + { + // In some contexts (e.g., return addresses), PA-RISC uses the lowest two + // bits of the address to indicate the privilege level. Clear those bits + // before trying to symbolize. + const auto pc_bits = reinterpret_cast(pc); + const auto address = pc_bits & ~0x3; + entry = GetUncachedSymbol(reinterpret_cast(address)); + if (entry != nullptr) { + return entry; + } + + // In some contexts, PA-RISC also uses bit 1 of the address to indicate that + // this is a cross-DSO function pointer. Such function pointers actually + // point to a procedure label, a struct whose first 32-bit (pointer) element + // actually points to the function text. With no symbol found for this + // address so far, try interpreting it as a cross-DSO function pointer and + // see how that goes. + if (pc_bits & 0x2) { + return GetUncachedSymbol(*reinterpret_cast(address)); + } + + return nullptr; + } +#else + return GetUncachedSymbol(pc); +#endif +} + +bool RemoveAllSymbolDecorators(void) { + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return false; + } + g_num_decorators = 0; + g_decorators_mu.Unlock(); + return true; +} + +bool RemoveSymbolDecorator(int ticket) { + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return false; + } + for (int i = 0; i < g_num_decorators; ++i) { + if (g_decorators[i].ticket == ticket) { + while (i < g_num_decorators - 1) { + g_decorators[i] = g_decorators[i + 1]; + ++i; + } + g_num_decorators = i; + break; + } + } + g_decorators_mu.Unlock(); + return true; // Decorator is known to be removed. +} + +int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) { + static int ticket = 0; + + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return -2; + } + int ret = ticket; + if (g_num_decorators >= kMaxDecorators) { + ret = -1; + } else { + g_decorators[g_num_decorators] = {decorator, arg, ticket++}; + ++g_num_decorators; + } + g_decorators_mu.Unlock(); + return ret; +} + +bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset, + const char *filename) { + SAFE_ASSERT(start <= end); + SAFE_ASSERT(filename != nullptr); + + InitSigSafeArena(); + + if (!g_file_mapping_mu.TryLock()) { + return false; + } + + bool ret = true; + if (g_num_file_mapping_hints >= kMaxFileMappingHints) { + ret = false; + } else { + // TODO(ckennelly): Move this into a string copy routine. + size_t len = strlen(filename); + char *dst = static_cast( + base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + ABSL_RAW_CHECK(dst != nullptr, "out of memory"); + memcpy(dst, filename, len + 1); + + auto &hint = g_file_mapping_hints[g_num_file_mapping_hints++]; + hint.start = start; + hint.end = end; + hint.offset = offset; + hint.filename = dst; + } + + g_file_mapping_mu.Unlock(); + return ret; +} + +bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset, + const char **filename) { + if (!g_file_mapping_mu.TryLock()) { + return false; + } + bool found = false; + for (int i = 0; i < g_num_file_mapping_hints; i++) { + if (g_file_mapping_hints[i].start <= *start && + *end <= g_file_mapping_hints[i].end) { + // We assume that the start_address for the mapping is the base + // address of the ELF section, but when [start_address,end_address) is + // not strictly equal to [hint.start, hint.end), that assumption is + // invalid. + // + // This uses the hint's start address (even though hint.start is not + // necessarily equal to start_address) to ensure the correct + // relocation is computed later. + *start = g_file_mapping_hints[i].start; + *end = g_file_mapping_hints[i].end; + *offset = g_file_mapping_hints[i].offset; + *filename = g_file_mapping_hints[i].filename; + found = true; + break; + } + } + g_file_mapping_mu.Unlock(); + return found; +} + +} // namespace debugging_internal + +bool Symbolize(const void *pc, char *out, int out_size) { + // Symbolization is very slow under tsan. + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); + SAFE_ASSERT(out_size >= 0); + debugging_internal::Symbolizer *s = debugging_internal::AllocateSymbolizer(); + const char *name = s->GetSymbol(pc); + bool ok = false; + if (name != nullptr && out_size > 0) { + strncpy(out, name, static_cast(out_size)); + ok = true; + if (out[static_cast(out_size) - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. Do so, with + // trailing ellipsis. + static constexpr char kEllipsis[] = "..."; + size_t ellipsis_size = + std::min(strlen(kEllipsis), static_cast(out_size) - 1); + memcpy(out + static_cast(out_size) - ellipsis_size - 1, kEllipsis, + ellipsis_size); + out[static_cast(out_size) - 1] = '\0'; + } + } + debugging_internal::FreeSymbolizer(s); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END(); + return ok; +} + +ABSL_NAMESPACE_END +} // namespace absl + +extern "C" bool AbslInternalGetFileMappingHint(const void **start, + const void **end, uint64_t *offset, + const char **filename) { + return absl::debugging_internal::GetFileMappingHint(start, end, offset, + filename); +} diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc new file mode 100644 index 00000000..a0f344dd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc @@ -0,0 +1,75 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +extern "C" { +const char* emscripten_pc_get_function(const void* pc); +} + +// clang-format off +EM_JS(bool, HaveOffsetConverter, (), + { return typeof wasmOffsetConverter !== 'undefined'; }); +// clang-format on + +namespace absl { +ABSL_NAMESPACE_BEGIN + +void InitializeSymbolizer(const char*) { + if (!HaveOffsetConverter()) { + ABSL_RAW_LOG(INFO, + "Symbolization unavailable. Rebuild with -sWASM=1 " + "and -sUSE_OFFSET_CONVERTER=1."); + } +} + +bool Symbolize(const void* pc, char* out, int out_size) { + // Check if we have the offset converter necessary for pc_get_function. + // Without it, the program will abort(). + if (!HaveOffsetConverter()) { + return false; + } + if (pc == nullptr || out_size <= 0) { + return false; + } + const char* func_name = emscripten_pc_get_function(pc); + if (func_name == nullptr) { + return false; + } + + strncpy(out, func_name, out_size); + + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + int ellipsis_size = std::min(sizeof(kEllipsis) - 1, out_size - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc new file mode 100644 index 00000000..db24456b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc @@ -0,0 +1,40 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace debugging_internal { + +int InstallSymbolDecorator(SymbolDecorator, void*) { return -1; } +bool RemoveSymbolDecorator(int) { return false; } +bool RemoveAllSymbolDecorators(void) { return false; } +bool RegisterFileMappingHint(const void *, const void *, uint64_t, const char *) { + return false; +} +bool GetFileMappingHint(const void **, const void **, uint64_t *, const char **) { + return false; +} + +} // namespace debugging_internal + +void InitializeSymbolizer(const char*) {} +bool Symbolize(const void *, char *, int) { return false; } + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc new file mode 100644 index 00000000..53a099a1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc @@ -0,0 +1,82 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See "Retrieving Symbol Information by Address": +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx + +#include + +// MSVC header dbghelp.h has a warning for an ignored typedef. +#pragma warning(push) +#pragma warning(disable:4091) +#include +#pragma warning(pop) + +#pragma comment(lib, "dbghelp.lib") + +#include +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +static HANDLE process = NULL; + +void InitializeSymbolizer(const char*) { + if (process != nullptr) { + return; + } + process = GetCurrentProcess(); + + // Symbols are not loaded until a reference is made requiring the + // symbols be loaded. This is the fastest, most efficient way to use + // the symbol handler. + SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME); + if (!SymInitialize(process, nullptr, true)) { + // GetLastError() returns a Win32 DWORD, but we assign to + // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform + // initialization guarantees this is not a narrowing conversion. + const unsigned long long error{GetLastError()}; // NOLINT(runtime/int) + ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error); + } +} + +bool Symbolize(const void* pc, char* out, int out_size) { + if (out_size <= 0) { + return false; + } + alignas(SYMBOL_INFO) char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME]; + SYMBOL_INFO* symbol = reinterpret_cast(buf); + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + symbol->MaxNameLen = MAX_SYM_NAME; + if (!SymFromAddr(process, reinterpret_cast(pc), nullptr, symbol)) { + return false; + } + const size_t out_size_t = static_cast(out_size); + strncpy(out, symbol->Name, out_size_t); + if (out[out_size_t - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + size_t ellipsis_size = + std::min(sizeof(kEllipsis) - 1, out_size_t - 1); + memcpy(out + out_size_t - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size_t - 1] = '\0'; + } + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h new file mode 100644 index 00000000..c738ac9e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h @@ -0,0 +1,210 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: commandlineflag.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `CommandLineFlag`, which acts as a type-erased +// handle for accessing metadata about the Abseil Flag in question. +// +// Because an actual Abseil flag is of an unspecified type, you should not +// manipulate or interact directly with objects of that type. Instead, use the +// CommandLineFlag type as an intermediary. +#ifndef ABSL_FLAGS_COMMANDLINEFLAG_H_ +#define ABSL_FLAGS_COMMANDLINEFLAG_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + class PrivateHandleAccessor; + } // namespace flags_internal + + // CommandLineFlag + // + // This type acts as a type-erased handle for an instance of an Abseil Flag and + // holds reflection information pertaining to that flag. Use CommandLineFlag to + // access a flag's name, location, help string etc. + // + // To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()` + // passing it the flag name string. + // + // Example: + // + // // Obtain reflection handle for a flag named "flagname". + // const absl::CommandLineFlag* my_flag_data = + // absl::FindCommandLineFlag("flagname"); + // + // // Now you can get flag info from that reflection handle. + // std::string flag_location = my_flag_data->Filename(); + // ... + class CommandLineFlag + { + public: + constexpr CommandLineFlag() = default; + + // Not copyable/assignable. + CommandLineFlag(const CommandLineFlag&) = delete; + CommandLineFlag& operator=(const CommandLineFlag&) = delete; + + // absl::CommandLineFlag::IsOfType() + // + // Return true iff flag has type T. + template + inline bool IsOfType() const + { + return TypeId() == base_internal::FastTypeId(); + } + + // absl::CommandLineFlag::TryGet() + // + // Attempts to retrieve the flag value. Returns value on success, + // absl::nullopt otherwise. + template + absl::optional TryGet() const + { + if (IsRetired() || !IsOfType()) + { + return absl::nullopt; + } + + // Implementation notes: + // + // We are wrapping a union around the value of `T` to serve three purposes: + // + // 1. `U.value` has correct size and alignment for a value of type `T` + // 2. The `U.value` constructor is not invoked since U's constructor does + // not do it explicitly. + // 3. The `U.value` destructor is invoked since U's destructor does it + // explicitly. This makes `U` a kind of RAII wrapper around non default + // constructible value of T, which is destructed when we leave the + // scope. We do need to destroy U.value, which is constructed by + // CommandLineFlag::Read even though we left it in a moved-from state + // after std::move. + // + // All of this serves to avoid requiring `T` being default constructible. + union U + { + T value; + U() + { + } + ~U() + { + value.~T(); + } + }; + U u; + + Read(&u.value); + // allow retired flags to be "read", so we can report invalid access. + if (IsRetired()) + { + return absl::nullopt; + } + return std::move(u.value); + } + + // absl::CommandLineFlag::Name() + // + // Returns name of this flag. + virtual absl::string_view Name() const = 0; + + // absl::CommandLineFlag::Filename() + // + // Returns name of the file where this flag is defined. + virtual std::string Filename() const = 0; + + // absl::CommandLineFlag::Help() + // + // Returns help message associated with this flag. + virtual std::string Help() const = 0; + + // absl::CommandLineFlag::IsRetired() + // + // Returns true iff this object corresponds to retired flag. + virtual bool IsRetired() const; + + // absl::CommandLineFlag::DefaultValue() + // + // Returns the default value for this flag. + virtual std::string DefaultValue() const = 0; + + // absl::CommandLineFlag::CurrentValue() + // + // Returns the current value for this flag. + virtual std::string CurrentValue() const = 0; + + // absl::CommandLineFlag::ParseFrom() + // + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. + bool ParseFrom(absl::string_view value, std::string* error); + + protected: + ~CommandLineFlag() = default; + + private: + friend class flags_internal::PrivateHandleAccessor; + + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. There + // are three ways to set the flag's value: + // * Update the current flag value + // * Update the flag's default value + // * Update the current flag value if it was never set before + // The mode is selected based on `set_mode` parameter. + virtual bool ParseFrom(absl::string_view value, flags_internal::FlagSettingMode set_mode, flags_internal::ValueSource source, std::string& error) = 0; + + // Returns id of the flag's value type. + virtual flags_internal::FlagFastTypeId TypeId() const = 0; + + // Interface to save flag to some persistent state. Returns current flag state + // or nullptr if flag does not support saving and restoring a state. + virtual std::unique_ptr SaveState() = 0; + + // Copy-construct a new value of the flag's type in a memory referenced by + // the dst based on the current flag's value. + virtual void Read(void* dst) const = 0; + + // To be deleted. Used to return true if flag's current value originated from + // command line. + virtual bool IsSpecifiedOnCommandLine() const = 0; + + // Validates supplied value using validator or parseflag routine + virtual bool ValidateInputValue(absl::string_view value) const = 0; + + // Checks that flags default value can be converted to string and back to the + // flag's value type. + virtual void CheckDefaultValueParsingRoundtrip() const = 0; + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/config.h b/CAPI/cpp/grpc/include/absl/flags/config.h new file mode 100644 index 00000000..9565e483 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/config.h @@ -0,0 +1,68 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_CONFIG_H_ +#define ABSL_FLAGS_CONFIG_H_ + +// Determine if we should strip string literals from the Flag objects. +// By default we strip string literals on mobile platforms. +#if !defined(ABSL_FLAGS_STRIP_NAMES) + +#if defined(__ANDROID__) +#define ABSL_FLAGS_STRIP_NAMES 1 + +#elif defined(__APPLE__) +#include +#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE +#define ABSL_FLAGS_STRIP_NAMES 1 +#elif defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED +#define ABSL_FLAGS_STRIP_NAMES 1 +#endif // TARGET_OS_* +#endif + +#endif // !defined(ABSL_FLAGS_STRIP_NAMES) + +#if !defined(ABSL_FLAGS_STRIP_NAMES) +// If ABSL_FLAGS_STRIP_NAMES wasn't set on the command line or above, +// the default is not to strip. +#define ABSL_FLAGS_STRIP_NAMES 0 +#endif + +#if !defined(ABSL_FLAGS_STRIP_HELP) +// By default, if we strip names, we also strip help. +#define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES +#endif + +// These macros represent the "source of truth" for the list of supported +// built-in types. +#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(bool, bool) \ + A(short, short) \ + A(unsigned short, unsigned_short) \ + A(int, int) \ + A(unsigned int, unsigned_int) \ + A(long, long) \ + A(unsigned long, unsigned_long) \ + A(long long, long_long) \ + A(unsigned long long, unsigned_long_long) \ + A(double, double) \ + A(float, float) + +#define ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(A) \ + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(std::string, std_string) \ + A(std::vector, std_vector_of_string) + +#endif // ABSL_FLAGS_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/declare.h b/CAPI/cpp/grpc/include/absl/flags/declare.h new file mode 100644 index 00000000..d9d50faf --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/declare.h @@ -0,0 +1,77 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: declare.h +// ----------------------------------------------------------------------------- +// +// This file defines the ABSL_DECLARE_FLAG macro, allowing you to declare an +// `absl::Flag` for use within a translation unit. You should place this +// declaration within the header file associated with the .cc file that defines +// and owns the `Flag`. + +#ifndef ABSL_FLAGS_DECLARE_H_ +#define ABSL_FLAGS_DECLARE_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // absl::Flag represents a flag of type 'T' created by ABSL_FLAG. + template + class Flag; + + } // namespace flags_internal + +// Flag +// +// Forward declaration of the `absl::Flag` type for use in defining the macro. +#if defined(_MSC_VER) && !defined(__clang__) + template + class Flag; +#else + template + using Flag = flags_internal::Flag; +#endif + + ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_DECLARE_FLAG() +// +// This macro is a convenience for declaring use of an `absl::Flag` within a +// translation unit. This macro should be used within a header file to +// declare usage of the flag within any .cc file including that header file. +// +// The ABSL_DECLARE_FLAG(type, name) macro expands to: +// +// extern absl::Flag FLAGS_name; +#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name) + +// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its +// arguments. Clients must use ABSL_DECLARE_FLAG instead. +#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ + extern absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ \ + { \ + } \ + /* second redeclaration is to allow applying attributes */ \ + extern absl::Flag FLAGS_##name + +#endif // ABSL_FLAGS_DECLARE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/flag.h b/CAPI/cpp/grpc/include/absl/flags/flag.h new file mode 100644 index 00000000..7ce03a4a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/flag.h @@ -0,0 +1,323 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flag.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::Flag` type for holding command-line +// flag data, and abstractions to create, get and set such flag data. +// +// It is important to note that this type is **unspecified** (an implementation +// detail) and you do not construct or manipulate actual `absl::Flag` +// instances. Instead, you define and declare flags using the +// `ABSL_FLAG()` and `ABSL_DECLARE_FLAG()` macros, and get and set flag values +// using the `absl::GetFlag()` and `absl::SetFlag()` functions. + +#ifndef ABSL_FLAGS_FLAG_H_ +#define ABSL_FLAGS_FLAG_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/flags/config.h" +#include "absl/flags/internal/flag.h" +#include "absl/flags/internal/registry.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + +// Flag +// +// An `absl::Flag` holds a command-line flag value, providing a runtime +// parameter to a binary. Such flags should be defined in the global namespace +// and (preferably) in the module containing the binary's `main()` function. +// +// You should not construct and cannot use the `absl::Flag` type directly; +// instead, you should declare flags using the `ABSL_DECLARE_FLAG()` macro +// within a header file, and define your flag using `ABSL_FLAG()` within your +// header's associated `.cc` file. Such flags will be named `FLAGS_name`. +// +// Example: +// +// .h file +// +// // Declares usage of a flag named "FLAGS_count" +// ABSL_DECLARE_FLAG(int, count); +// +// .cc file +// +// // Defines a flag named "FLAGS_count" with a default `int` value of 0. +// ABSL_FLAG(int, count, 0, "Count of items to process"); +// +// No public methods of `absl::Flag` are part of the Abseil Flags API. +// +// For type support of Abseil Flags, see the marshalling.h header file, which +// discusses supported standard types, optional flags, and additional Abseil +// type support. +#if !defined(_MSC_VER) || defined(__clang__) + template + using Flag = flags_internal::Flag; +#else +#include "absl/flags/internal/flag_msvc.inc" +#endif + + // GetFlag() + // + // Returns the value (of type `T`) of an `absl::Flag` instance, by value. Do + // not construct an `absl::Flag` directly and call `absl::GetFlag()`; + // instead, refer to flag's constructed variable name (e.g. `FLAGS_name`). + // Because this function returns by value and not by reference, it is + // thread-safe, but note that the operation may be expensive; as a result, avoid + // `absl::GetFlag()` within any tight loops. + // + // Example: + // + // // FLAGS_count is a Flag of type `int` + // int my_count = absl::GetFlag(FLAGS_count); + // + // // FLAGS_firstname is a Flag of type `std::string` + // std::string first_name = absl::GetFlag(FLAGS_firstname); + template + ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag) + { + return flags_internal::FlagImplPeer::InvokeGet(flag); + } + + // SetFlag() + // + // Sets the value of an `absl::Flag` to the value `v`. Do not construct an + // `absl::Flag` directly and call `absl::SetFlag()`; instead, use the + // flag's variable name (e.g. `FLAGS_name`). This function is + // thread-safe, but is potentially expensive. Avoid setting flags in general, + // but especially within performance-critical code. + template + void SetFlag(absl::Flag* flag, const T& v) + { + flags_internal::FlagImplPeer::InvokeSet(*flag, v); + } + + // Overload of `SetFlag()` to allow callers to pass in a value that is + // convertible to `T`. E.g., use this overload to pass a "const char*" when `T` + // is `std::string`. + template + void SetFlag(absl::Flag* flag, const V& v) + { + T value(v); + flags_internal::FlagImplPeer::InvokeSet(*flag, value); + } + + // GetFlagReflectionHandle() + // + // Returns the reflection handle corresponding to specified Abseil Flag + // instance. Use this handle to access flag's reflection information, like name, + // location, default value etc. + // + // Example: + // + // std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue(); + + template + const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f) + { + return flags_internal::FlagImplPeer::InvokeReflect(f); + } + + ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_FLAG() +// +// This macro defines an `absl::Flag` instance of a specified type `T`: +// +// ABSL_FLAG(T, name, default_value, help); +// +// where: +// +// * `T` is a supported flag type (see the list of types in `marshalling.h`), +// * `name` designates the name of the flag (as a global variable +// `FLAGS_name`), +// * `default_value` is an expression holding the default value for this flag +// (which must be implicitly convertible to `T`), +// * `help` is the help text, which can also be an expression. +// +// This macro expands to a flag named 'FLAGS_name' of type 'T': +// +// absl::Flag FLAGS_name = ...; +// +// Note that all such instances are created as global variables. +// +// For `ABSL_FLAG()` values that you wish to expose to other translation units, +// it is recommended to define those flags within the `.cc` file associated with +// the header where the flag is declared. +// +// Note: do not construct objects of type `absl::Flag` directly. Only use the +// `ABSL_FLAG()` macro for such construction. +#define ABSL_FLAG(Type, name, default_value, help) \ + ABSL_FLAG_IMPL(Type, name, default_value, help) + +// ABSL_FLAG().OnUpdate() +// +// Defines a flag of type `T` with a callback attached: +// +// ABSL_FLAG(T, name, default_value, help).OnUpdate(callback); +// +// `callback` should be convertible to `void (*)()`. +// +// After any setting of the flag value, the callback will be called at least +// once. A rapid sequence of changes may be merged together into the same +// callback. No concurrent calls to the callback will be made for the same +// flag. Callbacks are allowed to read the current value of the flag but must +// not mutate that flag. +// +// The update mechanism guarantees "eventual consistency"; if the callback +// derives an auxiliary data structure from the flag value, it is guaranteed +// that eventually the flag value and the derived data structure will be +// consistent. +// +// Note: ABSL_FLAG.OnUpdate() does not have a public definition. Hence, this +// comment serves as its API documentation. + +// ----------------------------------------------------------------------------- +// Implementation details below this section +// ----------------------------------------------------------------------------- + +// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_NAMES +#if !defined(_MSC_VER) || defined(__clang__) +#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag +#define ABSL_FLAG_IMPL_HELP_ARG(name) \ + absl::flags_internal::HelpArg( \ + FLAGS_help_storage_##name \ + ) +#define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) \ + absl::flags_internal::DefaultArg(0) +#else +#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag.GetImpl() +#define ABSL_FLAG_IMPL_HELP_ARG(name) &AbslFlagHelpGenFor##name::NonConst +#define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) &AbslFlagDefaultGenFor##name::Gen +#endif + +#if ABSL_FLAGS_STRIP_NAMES +#define ABSL_FLAG_IMPL_FLAGNAME(txt) "" +#define ABSL_FLAG_IMPL_FILENAME() "" +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), nullptr) +#else +#define ABSL_FLAG_IMPL_FLAGNAME(txt) txt +#define ABSL_FLAG_IMPL_FILENAME() __FILE__ +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), __FILE__) +#endif + +// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP + +#if ABSL_FLAGS_STRIP_HELP +#define ABSL_FLAG_IMPL_FLAGHELP(txt) absl::flags_internal::kStrippedFlagHelp +#else +#define ABSL_FLAG_IMPL_FLAGHELP(txt) txt +#endif + +// AbslFlagHelpGenFor##name is used to encapsulate both immediate (method Const) +// and lazy (method NonConst) evaluation of help message expression. We choose +// between the two via the call to HelpArg in absl::Flag instantiation below. +// If help message expression is constexpr evaluable compiler will optimize +// away this whole struct. +// TODO(rogeeff): place these generated structs into local namespace and apply +// ABSL_INTERNAL_UNIQUE_SHORT_NAME. +// TODO(rogeeff): Apply __attribute__((nodebug)) to FLAGS_help_storage_##name +#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \ + struct AbslFlagHelpGenFor##name \ + { \ + /* The expression is run in the caller as part of the */ \ + /* default value argument. That keeps temporaries alive */ \ + /* long enough for NonConst to work correctly. */ \ + static constexpr absl::string_view Value( \ + absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt) \ + ) \ + { \ + return absl_flag_help; \ + } \ + static std::string NonConst() \ + { \ + return std::string(Value()); \ + } \ + }; \ + constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \ + absl::flags_internal::HelpStringAsArray( \ + 0 \ + ); + +#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + struct AbslFlagDefaultGenFor##name \ + { \ + Type value = absl::flags_internal::InitDefaultValue(default_value); \ + static void Gen(void* absl_flag_default_loc) \ + { \ + new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ + } \ + }; + +// ABSL_FLAG_IMPL +// +// Note: Name of registrar object is not arbitrary. It is used to "grab" +// global name for FLAGS_no symbol, thus preventing the possibility +// of defining two flags with names foo and nofoo. +#define ABSL_FLAG_IMPL(Type, name, default_value, help) \ + extern ::absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ \ + { \ + } \ + ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ + ABSL_CONST_INIT absl::Flag FLAGS_##name{ \ + ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \ + extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \ + absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \ + ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name) + +// ABSL_RETIRED_FLAG +// +// Designates the flag (which is usually pre-existing) as "retired." A retired +// flag is a flag that is now unused by the program, but may still be passed on +// the command line, usually by production scripts. A retired flag is ignored +// and code can't access it at runtime. +// +// This macro registers a retired flag with given name and type, with a name +// identical to the name of the original flag you are retiring. The retired +// flag's type can change over time, so that you can retire code to support a +// custom flag type. +// +// This macro has the same signature as `ABSL_FLAG`. To retire a flag, simply +// replace an `ABSL_FLAG` definition with `ABSL_RETIRED_FLAG`, leaving the +// arguments unchanged (unless of course you actually want to retire the flag +// type at this time as well). +// +// `default_value` is only used as a double check on the type. `explanation` is +// unused. +// TODO(rogeeff): replace RETIRED_FLAGS with FLAGS once forward declarations of +// retired flags are cleaned up. +#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \ + static absl::flags_internal::RetiredFlag RETIRED_FLAGS_##name; \ + ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \ + (RETIRED_FLAGS_##name.Retire(#name), \ + ::absl::flags_internal::FlagRegistrarEmpty{}) + +#endif // ABSL_FLAGS_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h new file mode 100644 index 00000000..b05099ef --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h @@ -0,0 +1,73 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ +#define ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ + +#include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // An alias for flag fast type id. This value identifies the flag value type + // similarly to typeid(T), without relying on RTTI being available. In most + // cases this id is enough to uniquely identify the flag's value type. In a few + // cases we'll have to resort to using actual RTTI implementation if it is + // available. + using FlagFastTypeId = absl::base_internal::FastTypeIdType; + + // Options that control SetCommandLineOptionWithMode. + enum FlagSettingMode + { + // update the flag's value unconditionally (can call this multiple times). + SET_FLAGS_VALUE, + // update the flag's value, but *only if* it has not yet been updated + // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". + SET_FLAG_IF_DEFAULT, + // set the flag's default value to this. If the flag has not been updated + // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") + // change the flag's current value to the new default value as well. + SET_FLAGS_DEFAULT + }; + + // Options that control ParseFrom: Source of a value. + enum ValueSource + { + // Flag is being set by value specified on a command line. + kCommandLine, + // Flag is being set by value specified in the code. + kProgrammaticChange, + }; + + // Handle to FlagState objects. Specific flag state objects will restore state + // of a flag produced this flag state from method CommandLineFlag::SaveState(). + class FlagStateInterface + { + public: + virtual ~FlagStateInterface(); + + // Restores the flag originated this object to the saved state. + virtual void Restore() const = 0; + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/flag.h b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h new file mode 100644 index 00000000..04be78ae --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h @@ -0,0 +1,917 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_FLAG_H_ +#define ABSL_FLAGS_INTERNAL_FLAG_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/thread_annotations.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/config.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/flags/internal/registry.h" +#include "absl/flags/internal/sequence_lock.h" +#include "absl/flags/marshalling.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + /////////////////////////////////////////////////////////////////////////////// + // Forward declaration of absl::Flag public API. + namespace flags_internal + { + template + class Flag; + } // namespace flags_internal + +#if defined(_MSC_VER) && !defined(__clang__) + template + class Flag; +#else + template + using Flag = flags_internal::Flag; +#endif + + template + ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag); + + template + void SetFlag(absl::Flag* flag, const T& v); + + template + void SetFlag(absl::Flag* flag, const V& v); + + template + const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f); + + /////////////////////////////////////////////////////////////////////////////// + // Flag value type operations, eg., parsing, copying, etc. are provided + // by function specific to that type with a signature matching FlagOpFn. + + namespace flags_internal + { + + enum class FlagOp + { + kAlloc, + kDelete, + kCopy, + kCopyConstruct, + kSizeof, + kFastTypeId, + kRuntimeTypeId, + kParse, + kUnparse, + kValueOffset, + }; + using FlagOpFn = void* (*)(FlagOp, const void*, void*, void*); + + // Forward declaration for Flag value specific operations. + template + void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3); + + // Allocate aligned memory for a flag value. + inline void* Alloc(FlagOpFn op) + { + return op(FlagOp::kAlloc, nullptr, nullptr, nullptr); + } + // Deletes memory interpreting obj as flag value type pointer. + inline void Delete(FlagOpFn op, void* obj) + { + op(FlagOp::kDelete, nullptr, obj, nullptr); + } + // Copies src to dst interpreting as flag value type pointers. + inline void Copy(FlagOpFn op, const void* src, void* dst) + { + op(FlagOp::kCopy, src, dst, nullptr); + } + // Construct a copy of flag value in a location pointed by dst + // based on src - pointer to the flag's value. + inline void CopyConstruct(FlagOpFn op, const void* src, void* dst) + { + op(FlagOp::kCopyConstruct, src, dst, nullptr); + } + // Makes a copy of flag value pointed by obj. + inline void* Clone(FlagOpFn op, const void* obj) + { + void* res = flags_internal::Alloc(op); + flags_internal::CopyConstruct(op, obj, res); + return res; + } + // Returns true if parsing of input text is successful. + inline bool Parse(FlagOpFn op, absl::string_view text, void* dst, std::string* error) + { + return op(FlagOp::kParse, &text, dst, error) != nullptr; + } + // Returns string representing supplied value. + inline std::string Unparse(FlagOpFn op, const void* val) + { + std::string result; + op(FlagOp::kUnparse, val, &result, nullptr); + return result; + } + // Returns size of flag value type. + inline size_t Sizeof(FlagOpFn op) + { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kSizeof, nullptr, nullptr, nullptr) + )); + } + // Returns fast type id corresponding to the value type. + inline FlagFastTypeId FastTypeId(FlagOpFn op) + { + return reinterpret_cast( + op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr) + ); + } + // Returns fast type id corresponding to the value type. + inline const std::type_info* RuntimeTypeId(FlagOpFn op) + { + return reinterpret_cast( + op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr) + ); + } + // Returns offset of the field value_ from the field impl_ inside of + // absl::Flag data. Given FlagImpl pointer p you can get the + // location of the corresponding value as: + // reinterpret_cast(p) + ValueOffset(). + inline ptrdiff_t ValueOffset(FlagOpFn op) + { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kValueOffset, nullptr, nullptr, nullptr) + )); + } + + // Returns an address of RTTI's typeid(T). + template + inline const std::type_info* GenRuntimeTypeId() + { +#ifdef ABSL_INTERNAL_HAS_RTTI + return &typeid(T); +#else + return nullptr; +#endif + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag help auxiliary structs. + + // This is help argument for absl::Flag encapsulating the string literal pointer + // or pointer to function generating it as well as enum descriminating two + // cases. + using HelpGenFunc = std::string (*)(); + + template + struct FixedCharArray + { + char value[N]; + + template + static constexpr FixedCharArray FromLiteralString( + absl::string_view str, absl::index_sequence + ) + { + return (void)str, FixedCharArray({{str[I]..., '\0'}}); + } + }; + + template + constexpr FixedCharArray HelpStringAsArray(int) + { + return FixedCharArray::FromLiteralString( + Gen::Value(), absl::make_index_sequence{} + ); + } + + template + constexpr std::false_type HelpStringAsArray(char) + { + return std::false_type{}; + } + + union FlagHelpMsg + { + constexpr explicit FlagHelpMsg(const char* help_msg) : + literal(help_msg) + { + } + constexpr explicit FlagHelpMsg(HelpGenFunc help_gen) : + gen_func(help_gen) + { + } + + const char* literal; + HelpGenFunc gen_func; + }; + + enum class FlagHelpKind : uint8_t + { + kLiteral = 0, + kGenFunc = 1 + }; + + struct FlagHelpArg + { + FlagHelpMsg source; + FlagHelpKind kind; + }; + + extern const char kStrippedFlagHelp[]; + + // These two HelpArg overloads allows us to select at compile time one of two + // way to pass Help argument to absl::Flag. We'll be passing + // AbslFlagHelpGenFor##name as Gen and integer 0 as a single argument to prefer + // first overload if possible. If help message is evaluatable on constexpr + // context We'll be able to make FixedCharArray out of it and we'll choose first + // overload. In this case the help message expression is immediately evaluated + // and is used to construct the absl::Flag. No additional code is generated by + // ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the + // consideration, in which case the second overload will be used. The second + // overload does not attempt to evaluate the help message expression + // immediately and instead delays the evaluation by returning the function + // pointer (&T::NonConst) generating the help message when necessary. This is + // evaluatable in constexpr context, but the cost is an extra function being + // generated in the ABSL_FLAG code. + template + constexpr FlagHelpArg HelpArg(const FixedCharArray& value) + { + return {FlagHelpMsg(value.value), FlagHelpKind::kLiteral}; + } + + template + constexpr FlagHelpArg HelpArg(std::false_type) + { + return {FlagHelpMsg(&Gen::NonConst), FlagHelpKind::kGenFunc}; + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag default value auxiliary structs. + + // Signature for the function generating the initial flag value (usually + // based on default value supplied in flag's definition) + using FlagDfltGenFunc = void (*)(void*); + + union FlagDefaultSrc + { + constexpr explicit FlagDefaultSrc(FlagDfltGenFunc gen_func_arg) : + gen_func(gen_func_arg) + { + } + +#define ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE(T, name) \ + T name##_value; \ + constexpr explicit FlagDefaultSrc(T value) : name##_value(value) \ + { \ + } // NOLINT + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE) +#undef ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE + + void* dynamic_value; + FlagDfltGenFunc gen_func; + }; + + enum class FlagDefaultKind : uint8_t + { + kDynamicValue = 0, + kGenFunc = 1, + kOneWord = 2 // for default values UP to one word in size + }; + + struct FlagDefaultArg + { + FlagDefaultSrc source; + FlagDefaultKind kind; + }; + + // This struct and corresponding overload to InitDefaultValue are used to + // facilitate usage of {} as default value in ABSL_FLAG macro. + // TODO(rogeeff): Fix handling types with explicit constructors. + struct EmptyBraces + { + }; + + template + constexpr T InitDefaultValue(T t) + { + return t; + } + + template + constexpr T InitDefaultValue(EmptyBraces) + { + return T{}; + } + + template::value, int>::type = ((void)GenT{}, 0)> + constexpr FlagDefaultArg DefaultArg(int) + { + return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; + } + + template + constexpr FlagDefaultArg DefaultArg(char) + { + return {FlagDefaultSrc(&GenT::Gen), FlagDefaultKind::kGenFunc}; + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag current value auxiliary structs. + + constexpr int64_t UninitializedFlagValue() + { + return static_cast(0xababababababababll); + } + + template + using FlagUseValueAndInitBitStorage = + std::integral_constant::value && std::is_default_constructible::value && (sizeof(T) < 8)>; + + template + using FlagUseOneWordStorage = + std::integral_constant::value && (sizeof(T) <= 8)>; + + template + using FlagUseSequenceLockStorage = + std::integral_constant::value && (sizeof(T) > 8)>; + + enum class FlagValueStorageKind : uint8_t + { + kValueAndInitBit = 0, + kOneWordAtomic = 1, + kSequenceLocked = 2, + kAlignedBuffer = 3, + }; + + template + static constexpr FlagValueStorageKind StorageKind() + { + return FlagUseValueAndInitBitStorage::value ? FlagValueStorageKind::kValueAndInitBit : FlagUseOneWordStorage::value ? FlagValueStorageKind::kOneWordAtomic : + FlagUseSequenceLockStorage::value ? FlagValueStorageKind::kSequenceLocked : + FlagValueStorageKind::kAlignedBuffer; + } + + struct FlagOneWordValue + { + constexpr explicit FlagOneWordValue(int64_t v) : + value(v) + { + } + std::atomic value; + }; + + template + struct alignas(8) FlagValueAndInitBit + { + T value; + // Use an int instead of a bool to guarantee that a non-zero value has + // a bit set. + uint8_t init; + }; + + template()> + struct FlagValue; + + template + struct FlagValue : FlagOneWordValue + { + constexpr FlagValue() : + FlagOneWordValue(0) + { + } + bool Get(const SequenceLock&, T& dst) const + { + int64_t storage = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(storage == 0)) + { + return false; + } + dst = absl::bit_cast>(storage).value; + return true; + } + }; + + template + struct FlagValue : FlagOneWordValue + { + constexpr FlagValue() : + FlagOneWordValue(UninitializedFlagValue()) + { + } + bool Get(const SequenceLock&, T& dst) const + { + int64_t one_word_val = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) + { + return false; + } + std::memcpy(&dst, static_cast(&one_word_val), sizeof(T)); + return true; + } + }; + + template + struct FlagValue + { + bool Get(const SequenceLock& lock, T& dst) const + { + return lock.TryRead(&dst, value_words, sizeof(T)); + } + + static constexpr int kNumWords = + flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t); + + alignas(T) alignas( + std::atomic + ) std::atomic value_words[kNumWords]; + }; + + template + struct FlagValue + { + bool Get(const SequenceLock&, T&) const + { + return false; + } + + alignas(T) char value[sizeof(T)]; + }; + + /////////////////////////////////////////////////////////////////////////////// + // Flag callback auxiliary structs. + + // Signature for the mutation callback used by watched Flags + // The callback is noexcept. + // TODO(rogeeff): add noexcept after C++17 support is added. + using FlagCallbackFunc = void (*)(); + + struct FlagCallback + { + FlagCallbackFunc func; + absl::Mutex guard; // Guard for concurrent callback invocations. + }; + + /////////////////////////////////////////////////////////////////////////////// + // Flag implementation, which does not depend on flag value type. + // The class encapsulates the Flag's data and access to it. + + struct DynValueDeleter + { + explicit DynValueDeleter(FlagOpFn op_arg = nullptr); + void operator()(void* ptr) const; + + FlagOpFn op; + }; + + class FlagState; + + class FlagImpl final : public CommandLineFlag + { + public: + constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, FlagHelpArg help, FlagValueStorageKind value_kind, FlagDefaultArg default_arg) : + name_(name), + filename_(filename), + op_(op), + help_(help.source), + help_source_kind_(static_cast(help.kind)), + value_storage_kind_(static_cast(value_kind)), + def_kind_(static_cast(default_arg.kind)), + modified_(false), + on_command_line_(false), + callback_(nullptr), + default_value_(default_arg.source), + data_guard_{} + { + } + + // Constant access methods + int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + *value = ReadOneBool(); + } + template() == FlagValueStorageKind::kOneWordAtomic, int> = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + int64_t v = ReadOneWord(); + std::memcpy(value, static_cast(&v), sizeof(T)); + } + template() == FlagValueStorageKind::kValueAndInitBit, int>::type = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + *value = absl::bit_cast>(ReadOneWord()).value; + } + + // Mutating access methods + void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Interfaces to operate on callbacks. + void SetCallback(const FlagCallbackFunc mutation_callback) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Used in read/write operations to validate source/target has correct type. + // For example if flag is declared as absl::Flag FLAGS_foo, a call to + // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed + // int. To do that we pass the "assumed" type id (which is deduced from type + // int) as an argument `type_id`, which is in turn is validated against the + // type id stored in flag object by flag definition statement. + void AssertValidType(FlagFastTypeId type_id, const std::type_info* (*gen_rtti)()) const; + + private: + template + friend class Flag; + friend class FlagState; + + // Ensures that `data_guard_` is initialized and returns it. + absl::Mutex* DataGuard() const + ABSL_LOCK_RETURNED(reinterpret_cast(data_guard_)); + // Returns heap allocated value of type T initialized with default value. + std::unique_ptr MakeInitValue() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Flag initialization called via absl::call_once. + void Init(); + + // Offset value access methods. One per storage kind. These methods to not + // respect const correctness, so be very carefull using them. + + // This is a shared helper routine which encapsulates most of the magic. Since + // it is only used inside the three routines below, which are defined in + // flag.cc, we can define it in that file as well. + template + StorageT* OffsetValue() const; + // This is an accessor for a value stored in an aligned buffer storage + // used for non-trivially-copyable data types. + // Returns a mutable pointer to the start of a buffer. + void* AlignedBufferValue() const; + + // The same as above, but used for sequencelock-protected storage. + std::atomic* AtomicBufferValue() const; + + // This is an accessor for a value stored as one word atomic. Returns a + // mutable reference to an atomic value. + std::atomic& OneWordValue() const; + + // Attempts to parse supplied `value` string. If parsing is successful, + // returns new value. Otherwise returns nullptr. + std::unique_ptr TryParse(absl::string_view value, std::string& err) const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Stores the flag value based on the pointer to the source. + void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Copy the flag data, protected by `seq_lock_` into `dst`. + // + // REQUIRES: ValueStorageKind() == kSequenceLocked. + void ReadSequenceLockedData(void* dst) const + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + FlagHelpKind HelpSourceKind() const + { + return static_cast(help_source_kind_); + } + FlagValueStorageKind ValueStorageKind() const + { + return static_cast(value_storage_kind_); + } + FlagDefaultKind DefaultKind() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) + { + return static_cast(def_kind_); + } + + // CommandLineFlag interface implementation + absl::string_view Name() const override; + std::string Filename() const override; + std::string Help() const override; + FlagFastTypeId TypeId() const override; + bool IsSpecifiedOnCommandLine() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ValidateInputValue(absl::string_view value) const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void CheckDefaultValueParsingRoundtrip() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Interfaces to save and restore flags to/from persistent state. + // Returns current flag state or nullptr if flag does not support + // saving and restoring a state. + std::unique_ptr SaveState() override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Restores the flag state to the supplied state object. If there is + // nothing to restore returns false. Otherwise returns true. + bool RestoreState(const FlagState& flag_state) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + bool ParseFrom(absl::string_view value, FlagSettingMode set_mode, ValueSource source, std::string& error) override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Immutable flag's state. + + // Flags name passed to ABSL_FLAG as second arg. + const char* const name_; + // The file name where ABSL_FLAG resides. + const char* const filename_; + // Type-specific operations "vtable". + const FlagOpFn op_; + // Help message literal or function to generate it. + const FlagHelpMsg help_; + // Indicates if help message was supplied as literal or generator func. + const uint8_t help_source_kind_ : 1; + // Kind of storage this flag is using for the flag's value. + const uint8_t value_storage_kind_ : 2; + + uint8_t : 0; // The bytes containing the const bitfields must not be + // shared with bytes containing the mutable bitfields. + + // Mutable flag's state (guarded by `data_guard_`). + + // def_kind_ is not guard by DataGuard() since it is accessed in Init without + // locks. + uint8_t def_kind_ : 2; + // Has this flag's value been modified? + bool modified_ : 1 ABSL_GUARDED_BY(*DataGuard()); + // Has this flag been specified on command line. + bool on_command_line_ : 1 ABSL_GUARDED_BY(*DataGuard()); + + // Unique tag for absl::call_once call to initialize this flag. + absl::once_flag init_control_; + + // Sequence lock / mutation counter. + flags_internal::SequenceLock seq_lock_; + + // Optional flag's callback and absl::Mutex to guard the invocations. + FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); + // Either a pointer to the function generating the default value based on the + // value specified in ABSL_FLAG or pointer to the dynamically set default + // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish + // these two cases. + FlagDefaultSrc default_value_; + + // This is reserved space for an absl::Mutex to guard flag data. It will be + // initialized in FlagImpl::Init via placement new. + // We can't use "absl::Mutex data_guard_", since this class is not literal. + // We do not want to use "absl::Mutex* data_guard_", since this would require + // heap allocation during initialization, which is both slows program startup + // and can fail. Using reserved space + placement new allows us to avoid both + // problems. + alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)]; + }; + + /////////////////////////////////////////////////////////////////////////////// + // The Flag object parameterized by the flag's value type. This class implements + // flag reflection handle interface. + + template + class Flag + { + public: + constexpr Flag(const char* name, const char* filename, FlagHelpArg help, const FlagDefaultArg default_arg) : + impl_(name, filename, &FlagOps, help, flags_internal::StorageKind(), default_arg), + value_() + { + } + + // CommandLineFlag interface + absl::string_view Name() const + { + return impl_.Name(); + } + std::string Filename() const + { + return impl_.Filename(); + } + std::string Help() const + { + return impl_.Help(); + } + // Do not use. To be removed. + bool IsSpecifiedOnCommandLine() const + { + return impl_.IsSpecifiedOnCommandLine(); + } + std::string DefaultValue() const + { + return impl_.DefaultValue(); + } + std::string CurrentValue() const + { + return impl_.CurrentValue(); + } + + private: + template + friend class FlagRegistrar; + friend class FlagImplPeer; + + T Get() const + { + // See implementation notes in CommandLineFlag::Get(). + union U + { + T value; + U() + { + } + ~U() + { + value.~T(); + } + }; + U u; + +#if !defined(NDEBUG) + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); +#endif + + if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) + { + impl_.Read(&u.value); + } + return std::move(u.value); + } + void Set(const T& v) + { + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); + impl_.Write(&v); + } + + // Access to the reflection. + const CommandLineFlag& Reflect() const + { + return impl_; + } + + // Flag's data + // The implementation depends on value_ field to be placed exactly after the + // impl_ field, so that impl_ can figure out the offset to the value and + // access it. + FlagImpl impl_; + FlagValue value_; + }; + + /////////////////////////////////////////////////////////////////////////////// + // Trampoline for friend access + + class FlagImplPeer + { + public: + template + static T InvokeGet(const FlagType& flag) + { + return flag.Get(); + } + template + static void InvokeSet(FlagType& flag, const T& v) + { + flag.Set(v); + } + template + static const CommandLineFlag& InvokeReflect(const FlagType& f) + { + return f.Reflect(); + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // Implementation of Flag value specific operations routine. + template + void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) + { + switch (op) + { + case FlagOp::kAlloc: + { + std::allocator alloc; + return std::allocator_traits>::allocate(alloc, 1); + } + case FlagOp::kDelete: + { + T* p = static_cast(v2); + p->~T(); + std::allocator alloc; + std::allocator_traits>::deallocate(alloc, p, 1); + return nullptr; + } + case FlagOp::kCopy: + *static_cast(v2) = *static_cast(v1); + return nullptr; + case FlagOp::kCopyConstruct: + new (v2) T(*static_cast(v1)); + return nullptr; + case FlagOp::kSizeof: + return reinterpret_cast(static_cast(sizeof(T))); + case FlagOp::kFastTypeId: + return const_cast(base_internal::FastTypeId()); + case FlagOp::kRuntimeTypeId: + return const_cast(GenRuntimeTypeId()); + case FlagOp::kParse: + { + // Initialize the temporary instance of type T based on current value in + // destination (which is going to be flag's default value). + T temp(*static_cast(v2)); + if (!absl::ParseFlag(*static_cast(v1), &temp, static_cast(v3))) + { + return nullptr; + } + *static_cast(v2) = std::move(temp); + return v2; + } + case FlagOp::kUnparse: + *static_cast(v2) = + absl::UnparseFlag(*static_cast(v1)); + return nullptr; + case FlagOp::kValueOffset: + { + // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the + // offset of the data. + size_t round_to = alignof(FlagValue); + size_t offset = + (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; + return reinterpret_cast(offset); + } + } + return nullptr; + } + + /////////////////////////////////////////////////////////////////////////////// + // This class facilitates Flag object registration and tail expression-based + // flag definition, for example: + // ABSL_FLAG(int, foo, 42, "Foo help").OnUpdate(NotifyFooWatcher); + struct FlagRegistrarEmpty + { + }; + template + class FlagRegistrar + { + public: + explicit FlagRegistrar(Flag& flag, const char* filename) : + flag_(flag) + { + if (do_register) + flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); + } + + FlagRegistrar OnUpdate(FlagCallbackFunc cb) && + { + flag_.impl_.SetCallback(cb); + return *this; + } + + // Make the registrar "die" gracefully as an empty struct on a line where + // registration happens. Registrar objects are intended to live only as + // temporary. + operator FlagRegistrarEmpty() const + { + return {}; + } // NOLINT + + private: + Flag& flag_; // Flag being registered (not owned). + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc b/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc new file mode 100644 index 00000000..614d09fd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc @@ -0,0 +1,116 @@ +// +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Do not include this file directly. +// Include absl/flags/flag.h instead. + +// MSVC debug builds do not implement initialization with constexpr constructors +// correctly. To work around this we add a level of indirection, so that the +// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias +// to that class) and dynamically allocates an instance when necessary. We also +// forward all calls to internal::Flag methods via trampoline methods. In this +// setup the `absl::Flag` class does not have constructor and virtual methods, +// all the data members are public and thus MSVC is able to initialize it at +// link time. To deal with multiple threads accessing the flag for the first +// time concurrently we use an atomic boolean indicating if flag object is +// initialized. We also employ the double-checked locking pattern where the +// second level of protection is a global Mutex, so if two threads attempt to +// construct the flag concurrently only one wins. +// +// This solution is based on a recommendation here: +// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 + +namespace flags_internal { +absl::Mutex* GetGlobalConstructionGuard(); +} // namespace flags_internal + +// Public methods of `absl::Flag` are NOT part of the Abseil Flags API. +// See https://abseil.io/docs/cpp/guides/flags +template +class Flag { + public: + // No constructor and destructor to ensure this is an aggregate type. + // Visual Studio 2015 still requires the constructor for class to be + // constexpr initializable. +#if _MSC_VER <= 1900 + constexpr Flag(const char* name, const char* filename, + const flags_internal::HelpGenFunc help_gen, + const flags_internal::FlagDfltGenFunc default_value_gen) + : name_(name), + filename_(filename), + help_gen_(help_gen), + default_value_gen_(default_value_gen), + inited_(false), + impl_(nullptr) {} +#endif + + flags_internal::Flag& GetImpl() const { + if (!inited_.load(std::memory_order_acquire)) { + absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); + + if (inited_.load(std::memory_order_acquire)) { + return *impl_; + } + + impl_ = new flags_internal::Flag( + name_, filename_, + {flags_internal::FlagHelpMsg(help_gen_), + flags_internal::FlagHelpKind::kGenFunc}, + {flags_internal::FlagDefaultSrc(default_value_gen_), + flags_internal::FlagDefaultKind::kGenFunc}); + inited_.store(true, std::memory_order_release); + } + + return *impl_; + } + + // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. + // See https://abseil.io/docs/cpp/guides/flags + bool IsRetired() const { return GetImpl().IsRetired(); } + absl::string_view Name() const { return GetImpl().Name(); } + std::string Help() const { return GetImpl().Help(); } + bool IsModified() const { return GetImpl().IsModified(); } + bool IsSpecifiedOnCommandLine() const { + return GetImpl().IsSpecifiedOnCommandLine(); + } + std::string Filename() const { return GetImpl().Filename(); } + std::string DefaultValue() const { return GetImpl().DefaultValue(); } + std::string CurrentValue() const { return GetImpl().CurrentValue(); } + template + inline bool IsOfType() const { + return GetImpl().template IsOfType(); + } + T Get() const { + return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); + } + void Set(const T& v) { + flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); + } + void InvokeCallback() { GetImpl().InvokeCallback(); } + + const CommandLineFlag& Reflect() const { + return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); + } + + // The data members are logically private, but they need to be public for + // this to be an aggregate type. + const char* name_; + const char* filename_; + const flags_internal::HelpGenFunc help_gen_; + const flags_internal::FlagDfltGenFunc default_value_gen_; + + mutable std::atomic inited_; + mutable flags_internal::Flag* impl_; +}; diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/parse.h b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h new file mode 100644 index 00000000..c9833327 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h @@ -0,0 +1,76 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PARSE_H_ +#define ABSL_FLAGS_INTERNAL_PARSE_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/declare.h" +#include "absl/flags/internal/usage.h" +#include "absl/strings/string_view.h" + +ABSL_DECLARE_FLAG(std::vector, flagfile); +ABSL_DECLARE_FLAG(std::vector, fromenv); +ABSL_DECLARE_FLAG(std::vector, tryfromenv); +ABSL_DECLARE_FLAG(std::vector, undefok); + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + enum class UsageFlagsAction + { + kHandleUsage, + kIgnoreUsage + }; + enum class OnUndefinedFlag + { + kIgnoreUndefined, + kReportUndefined, + kAbortIfUndefined + }; + + // This is not a public interface. This interface exists to expose the ability + // to change help output stream in case of parsing errors. This is used by + // internal unit tests to validate expected outputs. + // When this was written, `EXPECT_EXIT` only supported matchers on stderr, + // but not on stdout. + std::vector ParseCommandLineImpl( + int argc, char* argv[], UsageFlagsAction usage_flag_action, OnUndefinedFlag undef_flag_action, std::ostream& error_help_output = std::cout + ); + + // -------------------------------------------------------------------- + // Inspect original command line + + // Returns true if flag with specified name was either present on the original + // command line or specified in flag file present on the original command line. + bool WasPresentOnCommandLine(absl::string_view flag_name); + + // Return existing flags similar to the parameter, in order to help in case of + // misspellings. + std::vector GetMisspellingHints(absl::string_view flag); + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h new file mode 100644 index 00000000..c80eb59e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h @@ -0,0 +1,62 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ +#define ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // A portable interface that returns the basename of the filename passed as an + // argument. It is similar to basename(3) + // . + // For example: + // flags_internal::Basename("a/b/prog/file.cc") + // returns "file.cc" + // flags_internal::Basename("file.cc") + // returns "file.cc" + inline absl::string_view Basename(absl::string_view filename) + { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos ? filename : filename.substr(last_slash_pos + 1); + } + + // A portable interface that returns the directory name of the filename + // passed as an argument, including the trailing slash. + // Returns the empty string if a slash is not found in the input file name. + // For example: + // flags_internal::Package("a/b/prog/file.cc") + // returns "a/b/prog/" + // flags_internal::Package("file.cc") + // returns "" + inline absl::string_view Package(absl::string_view filename) + { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos ? absl::string_view() : filename.substr(0, last_slash_pos + 1); + } + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h new file mode 100644 index 00000000..dcbd4c16 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h @@ -0,0 +1,61 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ +#define ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // This class serves as a trampoline to access private methods of + // CommandLineFlag. This class is intended for use exclusively internally inside + // of the Abseil Flags implementation. + class PrivateHandleAccessor + { + public: + // Access to CommandLineFlag::TypeId. + static FlagFastTypeId TypeId(const CommandLineFlag& flag); + + // Access to CommandLineFlag::SaveState. + static std::unique_ptr SaveState(CommandLineFlag& flag); + + // Access to CommandLineFlag::IsSpecifiedOnCommandLine. + static bool IsSpecifiedOnCommandLine(const CommandLineFlag& flag); + + // Access to CommandLineFlag::ValidateInputValue. + static bool ValidateInputValue(const CommandLineFlag& flag, absl::string_view value); + + // Access to CommandLineFlag::CheckDefaultValueParsingRoundtrip. + static void CheckDefaultValueParsingRoundtrip(const CommandLineFlag& flag); + + static bool ParseFrom(CommandLineFlag& flag, absl::string_view value, flags_internal::FlagSettingMode set_mode, flags_internal::ValueSource source, std::string& error); + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h new file mode 100644 index 00000000..6e983d1e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h @@ -0,0 +1,52 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ +#define ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ + +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Program name + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Returns program invocation name or "UNKNOWN" if `SetProgramInvocationName()` + // is never called. At the moment this is always set to argv[0] as part of + // library initialization. + std::string ProgramInvocationName(); + + // Returns base name for program invocation name. For example, if + // ProgramInvocationName() == "a/b/mybinary" + // then + // ShortProgramInvocationName() == "mybinary" + std::string ShortProgramInvocationName(); + + // Sets program invocation name to a new value. Should only be called once + // during program initialization, before any threads are spawned. + void SetProgramInvocationName(absl::string_view prog_name_str); + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/registry.h b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h new file mode 100644 index 00000000..72d9f743 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h @@ -0,0 +1,101 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_REGISTRY_H_ +#define ABSL_FLAGS_INTERNAL_REGISTRY_H_ + +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Global flags registry API. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Executes specified visitor for each non-retired flag in the registry. While + // callback are executed, the registry is locked and can't be changed. + void ForEachFlag(std::function visitor); + + //----------------------------------------------------------------------------- + + bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename); + + void FinalizeRegistry(); + + //----------------------------------------------------------------------------- + // Retired registrations: + // + // Retired flag registrations are treated specially. A 'retired' flag is + // provided only for compatibility with automated invocations that still + // name it. A 'retired' flag: + // - is not bound to a C++ FLAGS_ reference. + // - has a type and a value, but that value is intentionally inaccessible. + // - does not appear in --help messages. + // - is fully supported by _all_ flag parsing routines. + // - consumes args normally, and complains about type mismatches in its + // argument. + // - emits a complaint but does not die (e.g. LOG(ERROR)) if it is + // accessed by name through the flags API for parsing or otherwise. + // + // The registrations for a flag happen in an unspecified order as the + // initializers for the namespace-scope objects of a program are run. + // Any number of weak registrations for a flag can weakly define the flag. + // One non-weak registration will upgrade the flag from weak to non-weak. + // Further weak registrations of a non-weak flag are ignored. + // + // This mechanism is designed to support moving dead flags into a + // 'graveyard' library. An example migration: + // + // 0: Remove references to this FLAGS_flagname in the C++ codebase. + // 1: Register as 'retired' in old_lib. + // 2: Make old_lib depend on graveyard. + // 3: Add a redundant 'retired' registration to graveyard. + // 4: Remove the old_lib 'retired' registration. + // 5: Eventually delete the graveyard registration entirely. + // + + // Retire flag with name "name" and type indicated by ops. + void Retire(const char* name, FlagFastTypeId type_id, char* buf); + + constexpr size_t kRetiredFlagObjSize = 3 * sizeof(void*); + constexpr size_t kRetiredFlagObjAlignment = alignof(void*); + + // Registered a retired flag with name 'flag_name' and type 'T'. + template + class RetiredFlag + { + public: + void Retire(const char* flag_name) + { + flags_internal::Retire(flag_name, base_internal::FastTypeId(), buf_); + } + + private: + alignas(kRetiredFlagObjAlignment) char buf_[kRetiredFlagObjSize]; + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_REGISTRY_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h new file mode 100644 index 00000000..893258c6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h @@ -0,0 +1,204 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ +#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ + +#include +#include + +#include +#include +#include + +#include "absl/base/optimization.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Align 'x' up to the nearest 'align' bytes. + inline constexpr size_t AlignUp(size_t x, size_t align) + { + return align * ((x + align - 1) / align); + } + + // A SequenceLock implements lock-free reads. A sequence counter is incremented + // before and after each write, and readers access the counter before and after + // accessing the protected data. If the counter is verified to not change during + // the access, and the sequence counter value was even, then the reader knows + // that the read was race-free and valid. Otherwise, the reader must fall back + // to a Mutex-based code path. + // + // This particular SequenceLock starts in an "uninitialized" state in which + // TryRead() returns false. It must be enabled by calling MarkInitialized(). + // This serves as a marker that the associated flag value has not yet been + // initialized and a slow path needs to be taken. + // + // The memory reads and writes protected by this lock must use the provided + // `TryRead()` and `Write()` functions. These functions behave similarly to + // `memcpy()`, with one oddity: the protected data must be an array of + // `std::atomic`. This is to comply with the C++ standard, which + // considers data races on non-atomic objects to be undefined behavior. See "Can + // Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J. + // Boehm for more details. + // + // [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf + class SequenceLock + { + public: + constexpr SequenceLock() : + lock_(kUninitialized) + { + } + + // Mark that this lock is ready for use. + void MarkInitialized() + { + assert(lock_.load(std::memory_order_relaxed) == kUninitialized); + lock_.store(0, std::memory_order_release); + } + + // Copy "size" bytes of data from "src" to "dst", protected as a read-side + // critical section of the sequence lock. + // + // Unlike traditional sequence lock implementations which loop until getting a + // clean read, this implementation returns false in the case of concurrent + // calls to `Write`. In such a case, the caller should fall back to a + // locking-based slow path. + // + // Returns false if the sequence lock was not yet marked as initialized. + // + // NOTE: If this returns false, "dst" may be overwritten with undefined + // (potentially uninitialized) data. + bool TryRead(void* dst, const std::atomic* src, size_t size) const + { + // Acquire barrier ensures that no loads done by f() are reordered + // above the first load of the sequence counter. + int64_t seq_before = lock_.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) + return false; + RelaxedCopyFromAtomic(dst, src, size); + // Another acquire fence ensures that the load of 'lock_' below is + // strictly ordered after the RelaxedCopyToAtomic call above. + std::atomic_thread_fence(std::memory_order_acquire); + int64_t seq_after = lock_.load(std::memory_order_relaxed); + return ABSL_PREDICT_TRUE(seq_before == seq_after); + } + + // Copy "size" bytes from "src" to "dst" as a write-side critical section + // of the sequence lock. Any concurrent readers will be forced to retry + // until they get a read that does not conflict with this write. + // + // This call must be externally synchronized against other calls to Write, + // but may proceed concurrently with reads. + void Write(std::atomic* dst, const void* src, size_t size) + { + // We can use relaxed instructions to increment the counter since we + // are extenally synchronized. The std::atomic_thread_fence below + // ensures that the counter updates don't get interleaved with the + // copy to the data. + int64_t orig_seq = lock_.load(std::memory_order_relaxed); + assert((orig_seq & 1) == 0); // Must be initially unlocked. + lock_.store(orig_seq + 1, std::memory_order_relaxed); + + // We put a release fence between update to lock_ and writes to shared data. + // Thus all stores to shared data are effectively release operations and + // update to lock_ above cannot be re-ordered past any of them. Note that + // this barrier is not for the fetch_add above. A release barrier for the + // fetch_add would be before it, not after. + std::atomic_thread_fence(std::memory_order_release); + RelaxedCopyToAtomic(dst, src, size); + // "Release" semantics ensure that none of the writes done by + // RelaxedCopyToAtomic() can be reordered after the following modification. + lock_.store(orig_seq + 2, std::memory_order_release); + } + + // Return the number of times that Write() has been called. + // + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `IncrementModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + int64_t ModificationCount() const + { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized && (val & 1) == 0); + return val / 2; + } + + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `ModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + void IncrementModificationCount() + { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized); + lock_.store(val + 2, std::memory_order_relaxed); + } + + private: + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyFromAtomic(void* dst, const std::atomic* src, size_t size) + { + char* dst_byte = static_cast(dst); + while (size >= sizeof(uint64_t)) + { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, sizeof(word)); + dst_byte += sizeof(word); + src++; + size -= sizeof(word); + } + if (size > 0) + { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, size); + } + } + + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyToAtomic(std::atomic* dst, const void* src, size_t size) + { + const char* src_byte = static_cast(src); + while (size >= sizeof(uint64_t)) + { + uint64_t word; + std::memcpy(&word, src_byte, sizeof(word)); + dst->store(word, std::memory_order_relaxed); + src_byte += sizeof(word); + dst++; + size -= sizeof(word); + } + if (size > 0) + { + uint64_t word = 0; + std::memcpy(&word, src_byte, size); + dst->store(word, std::memory_order_relaxed); + } + } + + static constexpr int64_t kUninitialized = -1; + std::atomic lock_; + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/usage.h b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h new file mode 100644 index 00000000..9141014d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h @@ -0,0 +1,107 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_USAGE_H_ +#define ABSL_FLAGS_INTERNAL_USAGE_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Usage reporting interfaces + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // The format to report the help messages in. + enum class HelpFormat + { + kHumanReadable, + }; + + // The kind of usage help requested. + enum class HelpMode + { + kNone, + kImportant, + kShort, + kFull, + kPackage, + kMatch, + kVersion, + kOnlyCheckArgs + }; + + // Streams the help message describing `flag` to `out`. + // The default value for `flag` is included in the output. + void FlagHelp(std::ostream& out, const CommandLineFlag& flag, HelpFormat format = HelpFormat::kHumanReadable); + + // Produces the help messages for all flags matching the filter. A flag matches + // the filter if it is defined in a file with a filename which includes + // filter string as a substring. You can use '/' and '.' to restrict the + // matching to a specific file names. For example: + // FlagsHelp(out, "/path/to/file."); + // restricts help to only flags which resides in files named like: + // .../path/to/file. + // for any extension 'ext'. If the filter is empty this function produces help + // messages for all flags. + void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format, absl::string_view program_usage_message); + + // -------------------------------------------------------------------- + + // If any of the 'usage' related command line flags (listed on the bottom of + // this file) has been set this routine produces corresponding help message in + // the specified output stream and returns HelpMode that was handled. Otherwise + // it returns HelpMode::kNone. + HelpMode HandleUsageFlags(std::ostream& out, absl::string_view program_usage_message); + + // -------------------------------------------------------------------- + // Encapsulates the logic of exiting the binary depending on handled help mode. + + void MaybeExit(HelpMode mode); + + // -------------------------------------------------------------------- + // Globals representing usage reporting flags + + // Returns substring to filter help output (--help=substr argument) + std::string GetFlagsHelpMatchSubstr(); + // Returns the requested help mode. + HelpMode GetFlagsHelpMode(); + // Returns the requested help format. + HelpFormat GetFlagsHelpFormat(); + + // These are corresponding setters to the attributes above. + void SetFlagsHelpMatchSubstr(absl::string_view); + void SetFlagsHelpMode(HelpMode); + void SetFlagsHelpFormat(HelpFormat); + + // Deduces usage flags from the input argument in a form --name=value or + // --name. argument is already split into name and value before we call this + // function. + bool DeduceUsageFlags(absl::string_view name, absl::string_view value); + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/marshalling.h b/CAPI/cpp/grpc/include/absl/flags/marshalling.h new file mode 100644 index 00000000..7c5370d1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/marshalling.h @@ -0,0 +1,373 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: marshalling.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for extending Abseil flag support to +// custom types, and defines the set of overloads for fundamental types. +// +// Out of the box, the Abseil flags library supports the following types: +// +// * `bool` +// * `int16_t` +// * `uint16_t` +// * `int32_t` +// * `uint32_t` +// * `int64_t` +// * `uint64_t` +// * `float` +// * `double` +// * `std::string` +// * `std::vector` +// * `std::optional` +// * `absl::LogSeverity` (provided natively for layering reasons) +// +// Note that support for integral types is implemented using overloads for +// variable-width fundamental types (`short`, `int`, `long`, etc.). However, +// you should prefer the fixed-width integral types (`int32_t`, `uint64_t`, +// etc.) we've noted above within flag definitions. +// +// In addition, several Abseil libraries provide their own custom support for +// Abseil flags. Documentation for these formats is provided in the type's +// `AbslParseFlag()` definition. +// +// The Abseil time library provides the following support for civil time values: +// +// * `absl::CivilSecond` +// * `absl::CivilMinute` +// * `absl::CivilHour` +// * `absl::CivilDay` +// * `absl::CivilMonth` +// * `absl::CivilYear` +// +// and also provides support for the following absolute time values: +// +// * `absl::Duration` +// * `absl::Time` +// +// Additional support for Abseil types will be noted here as it is added. +// +// You can also provide your own custom flags by adding overloads for +// `AbslParseFlag()` and `AbslUnparseFlag()` to your type definitions. (See +// below.) +// +// ----------------------------------------------------------------------------- +// Optional Flags +// ----------------------------------------------------------------------------- +// +// The Abseil flags library supports flags of type `std::optional` where +// `T` is a type of one of the supported flags. We refer to this flag type as +// an "optional flag." An optional flag is either "valueless", holding no value +// of type `T` (indicating that the flag has not been set) or a value of type +// `T`. The valueless state in C++ code is represented by a value of +// `std::nullopt` for the optional flag. +// +// Using `std::nullopt` as an optional flag's default value allows you to check +// whether such a flag was ever specified on the command line: +// +// if (absl::GetFlag(FLAGS_foo).has_value()) { +// // flag was set on command line +// } else { +// // flag was not passed on command line +// } +// +// Using an optional flag in this manner avoids common workarounds for +// indicating such an unset flag (such as using sentinel values to indicate this +// state). +// +// An optional flag also allows a developer to pass a flag in an "unset" +// valueless state on the command line, allowing the flag to later be set in +// binary logic. An optional flag's valueless state is indicated by the special +// notation of passing the value as an empty string through the syntax `--flag=` +// or `--flag ""`. +// +// $ binary_with_optional --flag_in_unset_state= +// $ binary_with_optional --flag_in_unset_state "" +// +// Note: as a result of the above syntax requirements, an optional flag cannot +// be set to a `T` of any value which unparses to the empty string. +// +// ----------------------------------------------------------------------------- +// Adding Type Support for Abseil Flags +// ----------------------------------------------------------------------------- +// +// To add support for your user-defined type, add overloads of `AbslParseFlag()` +// and `AbslUnparseFlag()` as free (non-member) functions to your type. If `T` +// is a class type, these functions can be friend function definitions. These +// overloads must be added to the same namespace where the type is defined, so +// that they can be discovered by Argument-Dependent Lookup (ADL). +// +// Example: +// +// namespace foo { +// +// enum OutputMode { kPlainText, kHtml }; +// +// // AbslParseFlag converts from a string to OutputMode. +// // Must be in same namespace as OutputMode. +// +// // Parses an OutputMode from the command line flag value `text`. Returns +// // `true` and sets `*mode` on success; returns `false` and sets `*error` +// // on failure. +// bool AbslParseFlag(absl::string_view text, +// OutputMode* mode, +// std::string* error) { +// if (text == "plaintext") { +// *mode = kPlainText; +// return true; +// } +// if (text == "html") { +// *mode = kHtml; +// return true; +// } +// *error = "unknown value for enumeration"; +// return false; +// } +// +// // AbslUnparseFlag converts from an OutputMode to a string. +// // Must be in same namespace as OutputMode. +// +// // Returns a textual flag value corresponding to the OutputMode `mode`. +// std::string AbslUnparseFlag(OutputMode mode) { +// switch (mode) { +// case kPlainText: return "plaintext"; +// case kHtml: return "html"; +// } +// return absl::StrCat(mode); +// } +// +// Notice that neither `AbslParseFlag()` nor `AbslUnparseFlag()` are class +// members, but free functions. `AbslParseFlag/AbslUnparseFlag()` overloads +// for a type should only be declared in the same file and namespace as said +// type. The proper `AbslParseFlag/AbslUnparseFlag()` implementations for a +// given type will be discovered via Argument-Dependent Lookup (ADL). +// +// `AbslParseFlag()` may need, in turn, to parse simpler constituent types +// using `absl::ParseFlag()`. For example, a custom struct `MyFlagType` +// consisting of a `std::pair` would add an `AbslParseFlag()` +// overload for its `MyFlagType` like so: +// +// Example: +// +// namespace my_flag_type { +// +// struct MyFlagType { +// std::pair my_flag_data; +// }; +// +// bool AbslParseFlag(absl::string_view text, MyFlagType* flag, +// std::string* err); +// +// std::string AbslUnparseFlag(const MyFlagType&); +// +// // Within the implementation, `AbslParseFlag()` will, in turn invoke +// // `absl::ParseFlag()` on its constituent `int` and `std::string` types +// // (which have built-in Abseil flag support). +// +// bool AbslParseFlag(absl::string_view text, MyFlagType* flag, +// std::string* err) { +// std::pair tokens = +// absl::StrSplit(text, ','); +// if (!absl::ParseFlag(tokens.first, &flag->my_flag_data.first, err)) +// return false; +// if (!absl::ParseFlag(tokens.second, &flag->my_flag_data.second, err)) +// return false; +// return true; +// } +// +// // Similarly, for unparsing, we can simply invoke `absl::UnparseFlag()` on +// // the constituent types. +// std::string AbslUnparseFlag(const MyFlagType& flag) { +// return absl::StrCat(absl::UnparseFlag(flag.my_flag_data.first), +// ",", +// absl::UnparseFlag(flag.my_flag_data.second)); +// } +#ifndef ABSL_FLAGS_MARSHALLING_H_ +#define ABSL_FLAGS_MARSHALLING_H_ + +#include "absl/base/config.h" +#include "absl/numeric/int128.h" + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +#include +#endif +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Forward declaration to be used inside composable flag parse/unparse + // implementations + template + inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); + template + inline std::string UnparseFlag(const T& v); + + namespace flags_internal + { + + // Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. + bool AbslParseFlag(absl::string_view, bool*, std::string*); + bool AbslParseFlag(absl::string_view, short*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned short*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, int*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned int*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, long long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned long long*, // NOLINT + std::string*); + bool AbslParseFlag(absl::string_view, absl::int128*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, absl::uint128*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, float*, std::string*); + bool AbslParseFlag(absl::string_view, double*, std::string*); + bool AbslParseFlag(absl::string_view, std::string*, std::string*); + bool AbslParseFlag(absl::string_view, std::vector*, std::string*); + + template + bool AbslParseFlag(absl::string_view text, absl::optional* f, std::string* err) + { + if (text.empty()) + { + *f = absl::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) + return false; + + *f = std::move(value); + return true; + } + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) + template + bool AbslParseFlag(absl::string_view text, std::optional* f, std::string* err) + { + if (text.empty()) + { + *f = std::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) + return false; + + *f = std::move(value); + return true; + } +#endif + + template + bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) + { + // Comment on next line provides a good compiler error message if T + // does not have AbslParseFlag(absl::string_view, T*, std::string*). + return AbslParseFlag(input, dst, err); // Is T missing AbslParseFlag? + } + + // Strings and std:: containers do not have the same overload resolution + // considerations as fundamental types. Naming these 'AbslUnparseFlag' means we + // can avoid the need for additional specializations of Unparse (below). + std::string AbslUnparseFlag(absl::string_view v); + std::string AbslUnparseFlag(const std::vector&); + + template + std::string AbslUnparseFlag(const absl::optional& f) + { + return f.has_value() ? absl::UnparseFlag(*f) : ""; + } + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) + template + std::string AbslUnparseFlag(const std::optional& f) + { + return f.has_value() ? absl::UnparseFlag(*f) : ""; + } +#endif + + template + std::string Unparse(const T& v) + { + // Comment on next line provides a good compiler error message if T does not + // have UnparseFlag. + return AbslUnparseFlag(v); // Is T missing AbslUnparseFlag? + } + + // Overloads for builtin types. + std::string Unparse(bool v); + std::string Unparse(short v); // NOLINT + std::string Unparse(unsigned short v); // NOLINT + std::string Unparse(int v); // NOLINT + std::string Unparse(unsigned int v); // NOLINT + std::string Unparse(long v); // NOLINT + std::string Unparse(unsigned long v); // NOLINT + std::string Unparse(long long v); // NOLINT + std::string Unparse(unsigned long long v); // NOLINT + std::string Unparse(absl::int128 v); + std::string Unparse(absl::uint128 v); + std::string Unparse(float v); + std::string Unparse(double v); + + } // namespace flags_internal + + // ParseFlag() + // + // Parses a string value into a flag value of type `T`. Do not add overloads of + // this function for your type directly; instead, add an `AbslParseFlag()` + // free function as documented above. + // + // Some implementations of `AbslParseFlag()` for types which consist of other, + // constituent types which already have Abseil flag support, may need to call + // `absl::ParseFlag()` on those consituent string values. (See above.) + template + inline bool ParseFlag(absl::string_view input, T* dst, std::string* error) + { + return flags_internal::InvokeParseFlag(input, dst, error); + } + + // UnparseFlag() + // + // Unparses a flag value of type `T` into a string value. Do not add overloads + // of this function for your type directly; instead, add an `AbslUnparseFlag()` + // free function as documented above. + // + // Some implementations of `AbslUnparseFlag()` for types which consist of other, + // constituent types which already have Abseil flag support, may want to call + // `absl::UnparseFlag()` on those constituent types. (See above.) + template + inline std::string UnparseFlag(const T& v) + { + return flags_internal::Unparse(v); + } + + // Overloads for `absl::LogSeverity` can't (easily) appear alongside that type's + // definition because it is layered below flags. See proper documentation in + // base/log_severity.h. + enum class LogSeverity : int; + bool AbslParseFlag(absl::string_view, absl::LogSeverity*, std::string*); + std::string AbslUnparseFlag(absl::LogSeverity); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_MARSHALLING_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/parse.h b/CAPI/cpp/grpc/include/absl/flags/parse.h new file mode 100644 index 00000000..e4e312a2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/parse.h @@ -0,0 +1,138 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: parse.h +// ----------------------------------------------------------------------------- +// +// This file defines the main parsing function for Abseil flags: +// `absl::ParseCommandLine()`. + +#ifndef ABSL_FLAGS_PARSE_H_ +#define ABSL_FLAGS_PARSE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/internal/parse.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // This type represent information about an unrecognized flag in the command + // line. + struct UnrecognizedFlag + { + enum Source + { + kFromArgv, + kFromFlagfile + }; + + explicit UnrecognizedFlag(Source s, absl::string_view f) : + source(s), + flag_name(f) + { + } + // This field indicates where we found this flag: on the original command line + // or read in some flag file. + Source source; + // Name of the flag we did not recognize in --flag_name=value or --flag_name. + std::string flag_name; + }; + + inline bool operator==(const UnrecognizedFlag& lhs, const UnrecognizedFlag& rhs) + { + return lhs.source == rhs.source && lhs.flag_name == rhs.flag_name; + } + + namespace flags_internal + { + + HelpMode ParseAbseilFlagsOnlyImpl( + int argc, char* argv[], std::vector& positional_args, std::vector& unrecognized_flags, UsageFlagsAction usage_flag_action + ); + + } // namespace flags_internal + + // ParseAbseilFlagsOnly() + // + // Parses a list of command-line arguments, passed in the `argc` and `argv[]` + // parameters, into a set of Abseil Flag values, returning any unparsed + // arguments in `positional_args` and `unrecognized_flags` output parameters. + // + // This function classifies all the arguments (including content of the + // flagfiles, if any) into one of the following groups: + // + // * arguments specified as "--flag=value" or "--flag value" that match + // registered or built-in Abseil Flags. These are "Abseil Flag arguments." + // * arguments specified as "--flag" that are unrecognized as Abseil Flags + // * arguments that are not specified as "--flag" are positional arguments + // * arguments that follow the flag-terminating delimiter (`--`) are also + // treated as positional arguments regardless of their syntax. + // + // All of the deduced Abseil Flag arguments are then parsed into their + // corresponding flag values. If any syntax errors are found in these arguments, + // the binary exits with code 1. + // + // This function also handles Abseil Flags built-in usage flags (e.g. --help) + // if any were present on the command line. + // + // All the remaining positional arguments including original program name + // (argv[0]) are are returned in the `positional_args` output parameter. + // + // All unrecognized flags that are not otherwise ignored are returned in the + // `unrecognized_flags` output parameter. Note that the special `undefok` + // flag allows you to specify flags which can be safely ignored; `undefok` + // specifies these flags as a comma-separated list. Any unrecognized flags + // that appear within `undefok` will therefore be ignored and not included in + // the `unrecognized_flag` output parameter. + // + void ParseAbseilFlagsOnly(int argc, char* argv[], std::vector& positional_args, std::vector& unrecognized_flags); + + // ReportUnrecognizedFlags() + // + // Reports an error to `stderr` for all non-ignored unrecognized flags in + // the provided `unrecognized_flags` list. + void ReportUnrecognizedFlags( + const std::vector& unrecognized_flags + ); + + // ParseCommandLine() + // + // First parses Abseil Flags only from the command line according to the + // description in `ParseAbseilFlagsOnly`. In addition this function handles + // unrecognized and usage flags. + // + // If any unrecognized flags are located they are reported using + // `ReportUnrecognizedFlags`. + // + // If any errors detected during command line parsing, this routine reports a + // usage message and aborts the program. + // + // If any built-in usage flags were specified on the command line (e.g. + // `--help`), this function reports help messages and then gracefully exits the + // program. + // + // This function returns all the remaining positional arguments collected by + // `ParseAbseilFlagsOnly`. + std::vector ParseCommandLine(int argc, char* argv[]); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/reflection.h b/CAPI/cpp/grpc/include/absl/flags/reflection.h new file mode 100644 index 00000000..8806abf1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/reflection.h @@ -0,0 +1,93 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: reflection.h +// ----------------------------------------------------------------------------- +// +// This file defines the routines to access and operate on an Abseil Flag's +// reflection handle. + +#ifndef ABSL_FLAGS_REFLECTION_H_ +#define ABSL_FLAGS_REFLECTION_H_ + +#include + +#include "absl/base/config.h" +#include "absl/container/flat_hash_map.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + class FlagSaverImpl; + } // namespace flags_internal + + // FindCommandLineFlag() + // + // Returns the reflection handle of an Abseil flag of the specified name, or + // `nullptr` if not found. This function will emit a warning if the name of a + // 'retired' flag is specified. + absl::CommandLineFlag* FindCommandLineFlag(absl::string_view name); + + // Returns current state of the Flags registry in a form of mapping from flag + // name to a flag reflection handle. + absl::flat_hash_map GetAllFlags(); + + //------------------------------------------------------------------------------ + // FlagSaver + //------------------------------------------------------------------------------ + // + // A FlagSaver object stores the state of flags in the scope where the FlagSaver + // is defined, allowing modification of those flags within that scope and + // automatic restoration of the flags to their previous state upon leaving the + // scope. + // + // A FlagSaver can be used within tests to temporarily change the test + // environment and restore the test case to its previous state. + // + // Example: + // + // void MyFunc() { + // absl::FlagSaver fs; + // ... + // absl::SetFlag(&FLAGS_myFlag, otherValue); + // ... + // } // scope of FlagSaver left, flags return to previous state + // + // This class is thread-safe. + + class FlagSaver + { + public: + FlagSaver(); + ~FlagSaver(); + + FlagSaver(const FlagSaver&) = delete; + void operator=(const FlagSaver&) = delete; + + private: + flags_internal::FlagSaverImpl* impl_; + }; + + //----------------------------------------------------------------------------- + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_REFLECTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage.h b/CAPI/cpp/grpc/include/absl/flags/usage.h new file mode 100644 index 00000000..1838689a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/usage.h @@ -0,0 +1,44 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_USAGE_H_ +#define ABSL_FLAGS_USAGE_H_ + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Usage reporting interfaces + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Sets the "usage" message to be used by help reporting routines. + // For example: + // absl::SetProgramUsageMessage( + // absl::StrCat("This program does nothing. Sample usage:\n", argv[0], + // " ")); + // Do not include commandline flags in the usage: we do that for you! + // Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit. + void SetProgramUsageMessage(absl::string_view new_usage_message); + + // Returns the usage message set by SetProgramUsageMessage(). + absl::string_view ProgramUsageMessage(); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage_config.h b/CAPI/cpp/grpc/include/absl/flags/usage_config.h new file mode 100644 index 00000000..8f8439af --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/usage_config.h @@ -0,0 +1,140 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: usage_config.h +// ----------------------------------------------------------------------------- +// +// This file defines the main usage reporting configuration interfaces and +// documents Abseil's supported built-in usage flags. If these flags are found +// when parsing a command-line, Abseil will exit the program and display +// appropriate help messages. +#ifndef ABSL_FLAGS_USAGE_CONFIG_H_ +#define ABSL_FLAGS_USAGE_CONFIG_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// ----------------------------------------------------------------------------- +// Built-in Usage Flags +// ----------------------------------------------------------------------------- +// +// Abseil supports the following built-in usage flags. When passed, these flags +// exit the program and : +// +// * --help +// Shows help on important flags for this binary +// * --helpfull +// Shows help on all flags +// * --helpshort +// Shows help on only the main module for this program +// * --helppackage +// Shows help on all modules in the main package +// * --version +// Shows the version and build info for this binary and exits +// * --only_check_args +// Exits after checking all flags +// * --helpon +// Shows help on the modules named by this flag value +// * --helpmatch +// Shows help on modules whose name contains the specified substring + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace flags_internal + { + using FlagKindFilter = std::function; + } // namespace flags_internal + + // FlagsUsageConfig + // + // This structure contains the collection of callbacks for changing the behavior + // of the usage reporting routines in Abseil Flags. + struct FlagsUsageConfig + { + // Returns true if flags defined in the given source code file should be + // reported with --helpshort flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helpshort_flags("path/to/my/code.cc") returns true, invoking the + // program with --helpshort will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helpshort_flags; + + // Returns true if flags defined in the filename should be reported with + // --help flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_help_flags("path/to/my/code.cc") returns true, invoking the + // program with --help will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_help_flags; + + // Returns true if flags defined in the filename should be reported with + // --helppackage flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helppackage_flags("path/to/my/code.cc") returns true, invoking the + // program with --helppackage will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helppackage_flags; + + // Generates string containing program version. This is the string reported + // when user specifies --version in a command line. + std::function version_string; + + // Normalizes the filename specific to the build system/filesystem used. This + // routine is used when we report the information about the flag definition + // location. For instance, if your build resides at some location you do not + // want to expose in the usage output, you can trim it to show only relevant + // part. + // For example: + // normalize_filename("/my_company/some_long_path/src/project/file.cc") + // might produce + // "project/file.cc". + std::function normalize_filename; + }; + + // SetFlagsUsageConfig() + // + // Sets the usage reporting configuration callbacks. If any of the callbacks are + // not set in usage_config instance, then the default value of the callback is + // used. + void SetFlagsUsageConfig(FlagsUsageConfig usage_config); + + namespace flags_internal + { + + FlagsUsageConfig GetUsageConfig(); + + void ReportUsageError(absl::string_view msg, bool is_fatal); + + } // namespace flags_internal + ABSL_NAMESPACE_END +} // namespace absl + +extern "C" +{ + // Additional report of fatal usage error message before we std::exit. Error is + // fatal if is_fatal argument to ReportUsageError is true. + void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( + absl::string_view + ); + +} // extern "C" + +#endif // ABSL_FLAGS_USAGE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h new file mode 100644 index 00000000..4f5303a6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h @@ -0,0 +1,338 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: any_invocable.h +// ----------------------------------------------------------------------------- +// +// This header file defines an `absl::AnyInvocable` type that assumes ownership +// and wraps an object of an invocable type. (Invocable types adhere to the +// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.) +// +// In general, prefer `absl::AnyInvocable` when you need a type-erased +// function parameter that needs to take ownership of the type. +// +// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function` +// abstraction, but has a slightly different API and is not designed to be a +// drop-in replacement or C++11-compatible backfill of that type. +// +// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original +// implementation. + +#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/functional/internal/any_invocable.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::AnyInvocable + // + // `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that + // assumes ownership of an invocable object. Unlike `std::function`, an + // `absl::AnyInvocable` is more type-safe and provides the following additional + // benefits: + // + // * Properly adheres to const correctness of the underlying type + // * Is move-only so avoids concurrency problems with copied invocables and + // unnecessary copies in general. + // * Supports reference qualifiers allowing it to perform unique actions (noted + // below). + // + // `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation + // may wrap any invocable object with a compatible function signature, e.g. + // having arguments and return types convertible to types matching the + // `absl::AnyInvocable` signature, and also matching any stated reference + // qualifiers, as long as that type is moveable. It therefore provides broad + // type erasure for functional objects. + // + // An `absl::AnyInvocable` is typically used as a type-erased function parameter + // for accepting various functional objects: + // + // // Define a function taking an AnyInvocable parameter. + // void my_func(absl::AnyInvocable f) { + // ... + // }; + // + // // That function can accept any invocable type: + // + // // Accept a function reference. We don't need to move a reference. + // int func1() { return 0; }; + // my_func(func1); + // + // // Accept a lambda. We use std::move here because otherwise my_func would + // // copy the lambda. + // auto lambda = []() { return 0; }; + // my_func(std::move(lambda)); + // + // // Accept a function pointer. We don't need to move a function pointer. + // func2 = &func1; + // my_func(func2); + // + // // Accept an std::function by moving it. Note that the lambda is copyable + // // (satisfying std::function requirements) and moveable (satisfying + // // absl::AnyInvocable requirements). + // std::function func6 = []() { return 0; }; + // my_func(std::move(func6)); + // + // `AnyInvocable` also properly respects `const` qualifiers, reference + // qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as + // part of the user-specified function type (e.g. + // `AnyInvocable`). These qualifiers will be applied to + // the `AnyInvocable` object's `operator()`, and the underlying invocable must + // be compatible with those qualifiers. + // + // Comparison of const and non-const function types: + // + // // Store a closure inside of `func` with the function type `int()`. + // // Note that we have made `func` itself `const`. + // const AnyInvocable func = [](){ return 0; }; + // + // func(); // Compile-error: the passed type `int()` isn't `const`. + // + // // Store a closure inside of `const_func` with the function type + // // `int() const`. + // // Note that we have also made `const_func` itself `const`. + // const AnyInvocable const_func = [](){ return 0; }; + // + // const_func(); // Fine: `int() const` is `const`. + // + // In the above example, the call `func()` would have compiled if + // `std::function` were used even though the types are not const compatible. + // This is a bug, and using `absl::AnyInvocable` properly detects that bug. + // + // In addition to affecting the signature of `operator()`, the `const` and + // reference qualifiers of the function type also appropriately constrain which + // kinds of invocable objects you are allowed to place into the `AnyInvocable` + // instance. If you specify a function type that is const-qualified, then + // anything that you attempt to put into the `AnyInvocable` must be callable on + // a `const` instance of that type. + // + // Constraint example: + // + // // Fine because the lambda is callable when `const`. + // AnyInvocable func = [=](){ return 0; }; + // + // // This is a compile-error because the lambda isn't callable when `const`. + // AnyInvocable error = [=]() mutable { return 0; }; + // + // An `&&` qualifier can be used to express that an `absl::AnyInvocable` + // instance should be invoked at most once: + // + // // Invokes `continuation` with the logical result of an operation when + // // that operation completes (common in asynchronous code). + // void CallOnCompletion(AnyInvocable continuation) { + // int result_of_foo = foo(); + // + // // `std::move` is required because the `operator()` of `continuation` is + // // rvalue-reference qualified. + // std::move(continuation)(result_of_foo); + // } + // + // Attempting to call `absl::AnyInvocable` multiple times in such a case + // results in undefined behavior. + template + class AnyInvocable : private internal_any_invocable::Impl + { + private: + static_assert( + std::is_function::value, + "The template argument of AnyInvocable must be a function type." + ); + + using Impl = internal_any_invocable::Impl; + + public: + // The return type of Sig + using result_type = typename Impl::result_type; + + // Constructors + + // Constructs the `AnyInvocable` in an empty state. + AnyInvocable() noexcept = default; + AnyInvocable(std::nullptr_t) noexcept + { + } // NOLINT + + // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. + // Note that `f` is not guaranteed to be empty after move-construction, + // although it may be. + AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; + + // Constructs an `AnyInvocable` from an invocable object. + // + // Upon construction, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template::value>> + AnyInvocable(F&& f) // NOLINT + : + Impl(internal_any_invocable::ConversionConstruct(), std::forward(f)) + { + } + + // Constructs an `AnyInvocable` that holds an invocable object of type `T`, + // which is constructed in-place from the given arguments. + // + // Example: + // + // AnyInvocable func( + // absl::in_place_type, arg1, arg2); + // + template::value>> + explicit AnyInvocable(absl::in_place_type_t, Args&&... args) : + Impl(absl::in_place_type>, std::forward(args)...) + { + static_assert(std::is_same>::value, "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Overload of the above constructor to support list-initialization. + template&, Args...>::value>> + explicit AnyInvocable(absl::in_place_type_t, std::initializer_list ilist, Args&&... args) : + Impl(absl::in_place_type>, ilist, std::forward(args)...) + { + static_assert(std::is_same>::value, "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Assignment Operators + + // Assigns an `AnyInvocable` through move-assignment. + // Note that `f` is not guaranteed to be empty after move-assignment + // although it may be. + AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; + + // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If + // not empty, destroys the target, putting `*this` into an empty state. + AnyInvocable& operator=(std::nullptr_t) noexcept + { + this->Clear(); + return *this; + } + + // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. + // + // Upon assignment, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template::value>> + AnyInvocable& operator=(F&& f) + { + *this = AnyInvocable(std::forward(f)); + return *this; + } + + // Assigns an `AnyInvocable` from a reference to an invocable object. + // Upon assignment, stores a reference to the invocable object in the + // `AnyInvocable` instance. + template< + class F, + typename = absl::enable_if_t< + internal_any_invocable::CanAssignReferenceWrapper::value>> + AnyInvocable& operator=(std::reference_wrapper f) noexcept + { + *this = AnyInvocable(f); + return *this; + } + + // Destructor + + // If not empty, destroys the target. + ~AnyInvocable() = default; + + // absl::AnyInvocable::swap() + // + // Exchanges the targets of `*this` and `other`. + void swap(AnyInvocable& other) noexcept + { + std::swap(*this, other); + } + + // absl::AnyInvocable::operator bool() + // + // Returns `true` if `*this` is not empty. + // + // WARNING: An `AnyInvocable` that wraps an empty `std::function` is not + // itself empty. This behavior is consistent with the standard equivalent + // `std::move_only_function`. + // + // In other words: + // std::function f; // empty + // absl::AnyInvocable a = std::move(f); // not empty + explicit operator bool() const noexcept + { + return this->HasValue(); + } + + // Invokes the target object of `*this`. `*this` must not be empty. + // + // Note: The signature of this function call operator is the same as the + // template parameter `Sig`. + using Impl::operator(); + + // Equality operators + + // Returns `true` if `*this` is empty. + friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept + { + return !f.HasValue(); + } + + // Returns `true` if `*this` is empty. + friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept + { + return !f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept + { + return f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept + { + return f.HasValue(); + } + + // swap() + // + // Exchanges the targets of `f1` and `f2`. + friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept + { + f1.swap(f2); + } + + private: + // Friending other instantiations is necessary for conversions. + template + friend class internal_any_invocable::CoreImpl; + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/bind_front.h b/CAPI/cpp/grpc/include/absl/functional/bind_front.h new file mode 100644 index 00000000..dfbd5170 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/bind_front.h @@ -0,0 +1,196 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: bind_front.h +// ----------------------------------------------------------------------------- +// +// `absl::bind_front()` returns a functor by binding a number of arguments to +// the front of a provided (usually more generic) functor. Unlike `std::bind`, +// it does not require the use of argument placeholders. The simpler syntax of +// `absl::bind_front()` allows you to avoid known misuses with `std::bind()`. +// +// `absl::bind_front()` is meant as a drop-in replacement for C++20's upcoming +// `std::bind_front()`, which similarly resolves these issues with +// `std::bind()`. Both `bind_front()` alternatives, unlike `std::bind()`, allow +// partial function application. (See +// https://en.wikipedia.org/wiki/Partial_application). + +#ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_ +#define ABSL_FUNCTIONAL_BIND_FRONT_H_ + +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#include // For std::bind_front. +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + +#include "absl/functional/internal/front_binder.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + +// bind_front() +// +// Binds the first N arguments of an invocable object and stores them by value. +// +// Like `std::bind()`, `absl::bind_front()` is implicitly convertible to +// `std::function`. In particular, it may be used as a simpler replacement for +// `std::bind()` in most cases, as it does not require placeholders to be +// specified. More importantly, it provides more reliable correctness guarantees +// than `std::bind()`; while `std::bind()` will silently ignore passing more +// parameters than expected, for example, `absl::bind_front()` will report such +// mis-uses as errors. In C++20, `absl::bind_front` is replaced by +// `std::bind_front`. +// +// absl::bind_front(a...) can be seen as storing the results of +// std::make_tuple(a...). +// +// Example: Binding a free function. +// +// int Minus(int a, int b) { return a - b; } +// +// assert(absl::bind_front(Minus)(3, 2) == 3 - 2); +// assert(absl::bind_front(Minus, 3)(2) == 3 - 2); +// assert(absl::bind_front(Minus, 3, 2)() == 3 - 2); +// +// Example: Binding a member function. +// +// struct Math { +// int Double(int a) const { return 2 * a; } +// }; +// +// Math math; +// +// assert(absl::bind_front(&Math::Double)(&math, 3) == 2 * 3); +// // Stores a pointer to math inside the functor. +// assert(absl::bind_front(&Math::Double, &math)(3) == 2 * 3); +// // Stores a copy of math inside the functor. +// assert(absl::bind_front(&Math::Double, math)(3) == 2 * 3); +// // Stores std::unique_ptr inside the functor. +// assert(absl::bind_front(&Math::Double, +// std::unique_ptr(new Math))(3) == 2 * 3); +// +// Example: Using `absl::bind_front()`, instead of `std::bind()`, with +// `std::function`. +// +// class FileReader { +// public: +// void ReadFileAsync(const std::string& filename, std::string* content, +// const std::function& done) { +// // Calls Executor::Schedule(std::function). +// Executor::DefaultExecutor()->Schedule( +// absl::bind_front(&FileReader::BlockingRead, this, +// filename, content, done)); +// } +// +// private: +// void BlockingRead(const std::string& filename, std::string* content, +// const std::function& done) { +// CHECK_OK(file::GetContents(filename, content, {})); +// done(); +// } +// }; +// +// `absl::bind_front()` stores bound arguments explicitly using the type passed +// rather than implicitly based on the type accepted by its functor. +// +// Example: Binding arguments explicitly. +// +// void LogStringView(absl::string_view sv) { +// LOG(INFO) << sv; +// } +// +// Executor* e = Executor::DefaultExecutor(); +// std::string s = "hello"; +// absl::string_view sv = s; +// +// // absl::bind_front(LogStringView, arg) makes a copy of arg and stores it. +// e->Schedule(absl::bind_front(LogStringView, sv)); // ERROR: dangling +// // string_view. +// +// e->Schedule(absl::bind_front(LogStringView, s)); // OK: stores a copy of +// // s. +// +// To store some of the arguments passed to `absl::bind_front()` by reference, +// use std::ref()` and `std::cref()`. +// +// Example: Storing some of the bound arguments by reference. +// +// class Service { +// public: +// void Serve(const Request& req, std::function* done) { +// // The request protocol buffer won't be deleted until done is called. +// // It's safe to store a reference to it inside the functor. +// Executor::DefaultExecutor()->Schedule( +// absl::bind_front(&Service::BlockingServe, this, std::cref(req), +// done)); +// } +// +// private: +// void BlockingServe(const Request& req, std::function* done); +// }; +// +// Example: Storing bound arguments by reference. +// +// void Print(const std::string& a, const std::string& b) { +// std::cerr << a << b; +// } +// +// std::string hi = "Hello, "; +// std::vector names = {"Chuk", "Gek"}; +// // Doesn't copy hi. +// for_each(names.begin(), names.end(), +// absl::bind_front(Print, std::ref(hi))); +// +// // DO NOT DO THIS: the functor may outlive "hi", resulting in +// // dangling references. +// foo->DoInFuture(absl::bind_front(Print, std::ref(hi), "Guest")); // BAD! +// auto f = absl::bind_front(Print, std::ref(hi), "Guest"); // BAD! +// +// Example: Storing reference-like types. +// +// void Print(absl::string_view a, const std::string& b) { +// std::cerr << a << b; +// } +// +// std::string hi = "Hello, "; +// // Copies "hi". +// absl::bind_front(Print, hi)("Chuk"); +// +// // Compile error: std::reference_wrapper is not implicitly +// // convertible to string_view. +// // absl::bind_front(Print, std::cref(hi))("Chuk"); +// +// // Doesn't copy "hi". +// absl::bind_front(Print, absl::string_view(hi))("Chuk"); +// +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + using std::bind_front; +#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + template + constexpr functional_internal::bind_front_t bind_front( + F&& func, BoundArgs&&... args + ) + { + return functional_internal::bind_front_t( + absl::in_place, absl::forward(func), absl::forward(args)... + ); + } +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_BIND_FRONT_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/function_ref.h new file mode 100644 index 00000000..69b9d529 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/function_ref.h @@ -0,0 +1,148 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: function_ref.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::FunctionRef` type for holding a +// non-owning reference to an object of any invocable type. This function +// reference is typically most useful as a type-erased argument type for +// accepting function types that neither take ownership nor copy the type; using +// the reference type in this case avoids a copy and an allocation. Best +// practices of other non-owning reference-like objects (such as +// `absl::string_view`) apply here. +// +// An `absl::FunctionRef` is similar in usage to a `std::function` but has the +// following differences: +// +// * It doesn't own the underlying object. +// * It doesn't have a null or empty state. +// * It never performs deep copies or allocations. +// * It's much faster and cheaper to construct. +// * It's trivially copyable and destructable. +// +// Generally, `absl::FunctionRef` should not be used as a return value, data +// member, or to initialize a `std::function`. Such usages will often lead to +// problematic lifetime issues. Once you convert something to an +// `absl::FunctionRef` you cannot make a deep copy later. +// +// This class is suitable for use wherever a "const std::function<>&" +// would be used without making a copy. ForEach functions and other versions of +// the visitor pattern are a good example of when this class should be used. +// +// This class is trivial to copy and should be passed by value. +#ifndef ABSL_FUNCTIONAL_FUNCTION_REF_H_ +#define ABSL_FUNCTIONAL_FUNCTION_REF_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/functional/internal/function_ref.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // FunctionRef + // + // Dummy class declaration to allow the partial specialization based on function + // types below. + template + class FunctionRef; + + // FunctionRef + // + // An `absl::FunctionRef` is a lightweight wrapper to any invocable object with + // a compatible signature. Generally, an `absl::FunctionRef` should only be used + // as an argument type and should be preferred as an argument over a const + // reference to a `std::function`. `absl::FunctionRef` itself does not allocate, + // although the wrapped invocable may. + // + // Example: + // + // // The following function takes a function callback by const reference + // bool Visitor(const std::function& callback); + // + // // Assuming that the function is not stored or otherwise copied, it can be + // // replaced by an `absl::FunctionRef`: + // bool Visitor(absl::FunctionRef + // callback); + // + // Note: the assignment operator within an `absl::FunctionRef` is intentionally + // deleted to prevent misuse; because the `absl::FunctionRef` does not own the + // underlying type, assignment likely indicates misuse. + template + class FunctionRef + { + private: + // Used to disable constructors for objects that are not compatible with the + // signature of this FunctionRef. + template> + using EnableIfCompatible = + typename std::enable_if::value || std::is_convertible::value>::type; + + public: + // Constructs a FunctionRef from any invocable type. + template> + // NOLINTNEXTLINE(runtime/explicit) + FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) : + invoker_(&absl::functional_internal::InvokeObject) + { + absl::functional_internal::AssertNonNull(f); + ptr_.obj = &f; + } + + // Overload for function pointers. This eliminates a level of indirection that + // would happen if the above overload was used (it lets us store the pointer + // instead of a pointer to a pointer). + // + // This overload is also used for references to functions, since references to + // functions can decay to function pointers implicitly. + template< + typename F, + typename = EnableIfCompatible, + absl::functional_internal::EnableIf::value> = 0> + FunctionRef(F* f) // NOLINT(runtime/explicit) + : + invoker_(&absl::functional_internal::InvokeFunction) + { + assert(f != nullptr); + ptr_.fun = reinterpret_cast(f); + } + + // To help prevent subtle lifetime bugs, FunctionRef is not assignable. + // Typically, it should only be used as an argument type. + FunctionRef& operator=(const FunctionRef& rhs) = delete; + FunctionRef(const FunctionRef& rhs) = default; + + // Call the underlying object. + R operator()(Args... args) const + { + return invoker_(ptr_, std::forward(args)...); + } + + private: + absl::functional_internal::VoidPtr ptr_; + absl::functional_internal::Invoker invoker_; + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h new file mode 100644 index 00000000..8f3cfbd4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h @@ -0,0 +1,940 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Implementation details for `absl::AnyInvocable` + +#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ + +//////////////////////////////////////////////////////////////////////////////// +// // +// This implementation of the proposed `any_invocable` uses an approach that // +// chooses between local storage and remote storage for the contained target // +// object based on the target object's size, alignment requirements, and // +// whether or not it has a nothrow move constructor. Additional optimizations // +// are performed when the object is a trivially copyable type [basic.types]. // +// // +// There are three datamembers per `AnyInvocable` instance // +// // +// 1) A union containing either // +// - A pointer to the target object referred to via a void*, or // +// - the target object, emplaced into a raw char buffer // +// // +// 2) A function pointer to a "manager" function operation that takes a // +// discriminator and logically branches to either perform a move operation // +// or destroy operation based on that discriminator. // +// // +// 3) A function pointer to an "invoker" function operation that invokes the // +// target object, directly returning the result. // +// // +// When in the logically empty state, the manager function is an empty // +// function and the invoker function is one that would be undefined-behavior // +// to call. // +// // +// An additional optimization is performed when converting from one // +// AnyInvocable to another where only the noexcept specification and/or the // +// cv/ref qualifiers of the function type differ. In these cases, the // +// conversion works by "moving the guts", similar to if they were the same // +// exact type, as opposed to having to perform an additional layer of // +// wrapping through remote storage. // +// // +//////////////////////////////////////////////////////////////////////////////// + +// IWYU pragma: private, include "absl/functional/any_invocable.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + +// Helper macro used to prevent spelling `noexcept` in language versions older +// than C++17, where it is not part of the type system, in order to avoid +// compilation failures and internal compiler errors. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) +#else +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) +#endif + + // Defined in functional/any_invocable.h + template + class AnyInvocable; + + namespace internal_any_invocable + { + + // Constants relating to the small-object-storage for AnyInvocable + enum StorageProperty : std::size_t + { + kAlignment = alignof(std::max_align_t), // The alignment of the storage + kStorageSize = sizeof(void*) * 2 // The size of the storage + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction for checking if a type is an AnyInvocable instantiation. + // This is used during conversion operations. + template + struct IsAnyInvocable : std::false_type + { + }; + + template + struct IsAnyInvocable> : std::true_type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A type trait that tells us whether or not a target function type should be + // stored locally in the small object optimization storage + template + using IsStoredLocally = std::integral_constant< + bool, + sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && + kAlignment % alignof(T) == 0 && + std::is_nothrow_move_constructible::value>; + + // An implementation of std::remove_cvref_t of C++20. + template + using RemoveCVRef = + typename std::remove_cv::type>::type; + + //////////////////////////////////////////////////////////////////////////////// + // + // An implementation of the C++ standard INVOKE pseudo-macro, operation is + // equivalent to std::invoke except that it forces an implicit conversion to the + // specified return type. If "R" is void, the function is executed and the + // return value is simply ignored. + template::value>> + void InvokeR(F&& f, P&&... args) + { + absl::base_internal::invoke(std::forward(f), std::forward

(args)...); + } + + template::value, int> = 0> + ReturnType InvokeR(F&& f, P&&... args) + { + // GCC 12 has a false-positive -Wmaybe-uninitialized warning here. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return absl::base_internal::invoke(std::forward(f), std::forward

(args)...); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + } + + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + /// + // A metafunction that takes a "T" corresponding to a parameter type of the + // user's specified function type, and yields the parameter type to use for the + // type-erased invoker. In order to prevent observable moves, this must be + // either a reference or, if the type is trivial, the original parameter type + // itself. Since the parameter type may be incomplete at the point that this + // metafunction is used, we can only do this optimization for scalar types + // rather than for any trivial type. + template + T ForwardImpl(std::true_type); + + template + T&& ForwardImpl(std::false_type); + + // NOTE: We deliberately use an intermediate struct instead of a direct alias, + // as a workaround for b/206991861 on MSVC versions < 1924. + template + struct ForwardedParameter + { + using type = decltype(( + ForwardImpl + )(std::integral_constant::value>())); + }; + + template + using ForwardedParameterType = typename ForwardedParameter::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // A discriminator when calling the "manager" function that describes operation + // type-erased operation should be invoked. + // + // "relocate_from_to" specifies that the manager should perform a move. + // + // "dispose" specifies that the manager should perform a destroy. + enum class FunctionToCall : bool + { + relocate_from_to, + dispose + }; + + // The portion of `AnyInvocable` state that contains either a pointer to the + // target object or the object itself in local storage + union TypeErasedState + { + struct + { + // A pointer to the type-erased object when remotely stored + void* target; + // The size of the object for `RemoteManagerTrivial` + std::size_t size; + } remote; + + // Local-storage for the type-erased object when small and trivial enough + alignas(kAlignment) char storage[kStorageSize]; + }; + + // A typed accessor for the object in `TypeErasedState` storage + template + T& ObjectInLocalStorage(TypeErasedState* const state) + { + // We launder here because the storage may be reused with the same type. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L + return *std::launder(reinterpret_cast(&state->storage)); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return *__builtin_launder(reinterpret_cast(&state->storage)); +#else + + // When `std::launder` or equivalent are not available, we rely on undefined + // behavior, which works as intended on Abseil's officially supported + // platforms as of Q2 2022. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic push +#endif + return *reinterpret_cast(&state->storage); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +#endif + } + + // The type for functions issuing lifetime-related operations: move and dispose + // A pointer to such a function is contained in each `AnyInvocable` instance. + // NOTE: When specifying `FunctionToCall::`dispose, the same state must be + // passed as both "from" and "to". + using ManagerType = void(FunctionToCall /*operation*/, TypeErasedState* /*from*/, TypeErasedState* /*to*/) + ABSL_INTERNAL_NOEXCEPT_SPEC(true); + + // The type for functions issuing the actual invocation of the object + // A pointer to such a function is contained in each AnyInvocable instance. + template + using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) + ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); + + // The manager that is used when AnyInvocable is empty + inline void EmptyManager(FunctionToCall /*operation*/, TypeErasedState* /*from*/, TypeErasedState* /*to*/) noexcept + { + } + + // The manager that is used when a target function is in local storage and is + // a trivially copyable type. + inline void LocalManagerTrivial(FunctionToCall /*operation*/, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + // This single statement without branching handles both possible operations. + // + // For FunctionToCall::dispose, "from" and "to" point to the same state, and + // so this assignment logically would do nothing. + // + // Note: Correctness here relies on http://wg21.link/p0593, which has only + // become standard in C++20, though implementations do not break it in + // practice for earlier versions of C++. + // + // The correct way to do this without that paper is to first placement-new a + // default-constructed T in "to->storage" prior to the memmove, but doing so + // requires a different function to be created for each T that is stored + // locally, which can cause unnecessary bloat and be less cache friendly. + *to = *from; + + // Note: Because the type is trivially copyable, the destructor does not need + // to be called ("trivially copyable" requires a trivial destructor). + } + + // The manager that is used when a target function is in local storage and is + // not a trivially copyable type. + template + void LocalManagerNontrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + static_assert(IsStoredLocally::value, "Local storage must only be used for supported types."); + static_assert(!std::is_trivially_copyable::value, "Locally stored types must be trivially copyable."); + + T& from_object = (ObjectInLocalStorage)(from); + + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + ::new (static_cast(&to->storage)) T(std::move(from_object)); + ABSL_FALLTHROUGH_INTENDED; + case FunctionToCall::dispose: + from_object.~T(); // Must not throw. // NOLINT + return; + } + ABSL_UNREACHABLE(); + } + + // The invoker that is used when a target function is in local storage + // Note: QualTRef here is the target function type along with cv and reference + // qualifiers that must be used when calling the function. + template + ReturnType LocalInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args + ) noexcept(SigIsNoexcept) + { + using RawT = RemoveCVRef; + static_assert( + IsStoredLocally::value, + "Target object must be in local storage in order to be invoked from it." + ); + + auto& f = (ObjectInLocalStorage)(state); + return (InvokeR)(static_cast(f), static_cast>(args)...); + } + + // The manager that is used when a target function is in remote storage and it + // has a trivial destructor + inline void RemoteManagerTrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote = from->remote; + return; + case FunctionToCall::dispose: +#if defined(__cpp_sized_deallocation) + ::operator delete(from->remote.target, from->remote.size); +#else // __cpp_sized_deallocation + ::operator delete(from->remote.target); +#endif // __cpp_sized_deallocation + return; + } + ABSL_UNREACHABLE(); + } + + // The manager that is used when a target function is in remote storage and the + // destructor of the type is not trivial + template + void RemoteManagerNontrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + static_assert(!IsStoredLocally::value, "Remote storage must only be used for types that do not " + "qualify for local storage."); + + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote.target = from->remote.target; + return; + case FunctionToCall::dispose: + ::delete static_cast(from->remote.target); // Must not throw. + return; + } + ABSL_UNREACHABLE(); + } + + // The invoker that is used when a target function is in remote storage + template + ReturnType RemoteInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args + ) noexcept(SigIsNoexcept) + { + using RawT = RemoveCVRef; + static_assert(!IsStoredLocally::value, "Target object must be in remote storage in order to be " + "invoked from it."); + + auto& f = *static_cast(state->remote.target); + return (InvokeR)(static_cast(f), static_cast>(args)...); + } + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that checks if a type T is an instantiation of + // absl::in_place_type_t (needed for constructor constraints of AnyInvocable). + template + struct IsInPlaceType : std::false_type + { + }; + + template + struct IsInPlaceType> : std::true_type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A constructor name-tag used with CoreImpl (below) to request the + // conversion-constructor. QualDecayedTRef is the decayed-type of the object to + // wrap, along with the cv and reference qualifiers that must be applied when + // performing an invocation of the wrapped object. + template + struct TypedConversionConstruct + { + }; + + // A helper base class for all core operations of AnyInvocable. Most notably, + // this class creates the function call operator and constraint-checkers so that + // the top-level class does not have to be a series of partial specializations. + // + // Note: This definition exists (as opposed to being a declaration) so that if + // the user of the top-level template accidentally passes a template argument + // that is not a function type, they will get a static_assert in AnyInvocable's + // class body rather than an error stating that Impl is not defined. + template + class Impl + { + }; // Note: This is partially-specialized later. + +// A std::unique_ptr deleter that deletes memory allocated via ::operator new. +#if defined(__cpp_sized_deallocation) + class TrivialDeleter + { + public: + explicit TrivialDeleter(std::size_t size) : + size_(size) + { + } + + void operator()(void* target) const + { + ::operator delete(target, size_); + } + + private: + std::size_t size_; + }; +#else // __cpp_sized_deallocation + class TrivialDeleter + { + public: + explicit TrivialDeleter(std::size_t) + { + } + + void operator()(void* target) const + { + ::operator delete(target); + } + }; +#endif // __cpp_sized_deallocation + + template + class CoreImpl; + + constexpr bool IsCompatibleConversion(void*, void*) + { + return false; + } + template + constexpr bool IsCompatibleConversion(CoreImpl*, CoreImpl*) + { + return !NoExceptDest || NoExceptSrc; + } + + // A helper base class for all core operations of AnyInvocable that do not + // depend on the cv/ref qualifiers of the function type. + template + class CoreImpl + { + public: + using result_type = ReturnType; + + CoreImpl() noexcept : + manager_(EmptyManager), + invoker_(nullptr) + { + } + + enum class TargetType + { + kPointer, + kCompatibleAnyInvocable, + kIncompatibleAnyInvocable, + kOther, + }; + + // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with + // the invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(TypedConversionConstruct, F&& f) + { + using DecayedT = RemoveCVRef; + + constexpr TargetType kTargetType = + (std::is_pointer::value || + std::is_member_pointer::value) ? + TargetType::kPointer : + IsCompatibleAnyInvocable::value ? TargetType::kCompatibleAnyInvocable : + IsAnyInvocable::value ? TargetType::kIncompatibleAnyInvocable : + TargetType::kOther; + // NOTE: We only use integers instead of enums as template parameters in + // order to work around a bug on C++14 under MSVC 2017. + // See b/236131881. + Initialize(std::forward(f)); + } + + // Note: QualTRef here includes the cv-ref qualifiers associated with the + // invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(absl::in_place_type_t, Args&&... args) + { + InitializeStorage(std::forward(args)...); + } + + CoreImpl(CoreImpl&& other) noexcept + { + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + } + + CoreImpl& operator=(CoreImpl&& other) noexcept + { + // Put the left-hand operand in an empty state. + // + // Note: A full reset that leaves us with an object that has its invariants + // intact is necessary in order to handle self-move. This is required by + // types that are used with certain operations of the standard library, such + // as the default definition of std::swap when both operands target the same + // object. + Clear(); + + // Perform the actual move/destroy operation on the target function. + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + + return *this; + } + + ~CoreImpl() + { + manager_(FunctionToCall::dispose, &state_, &state_); + } + + // Check whether or not the AnyInvocable is in the empty state. + bool HasValue() const + { + return invoker_ != nullptr; + } + + // Effects: Puts the object into its empty state. + void Clear() + { + manager_(FunctionToCall::dispose, &state_, &state_); + manager_ = EmptyManager; + invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) + { +// This condition handles types that decay into pointers, which includes +// function references. Since function references cannot be null, GCC warns +// against comparing their decayed form with nullptr. +// Since this is template-heavy code, we prefer to disable these warnings +// locally instead of adding yet another overload of this function. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic push +#endif + if (static_cast>(f) == nullptr) + { +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + manager_ = EmptyManager; + invoker_ = nullptr; + return; + } + InitializeStorage(std::forward(f)); + } + + template = 0> + void Initialize(F&& f) + { + // In this case we can "steal the guts" of the other AnyInvocable. + f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); + manager_ = f.manager_; + invoker_ = f.invoker_; + + f.manager_ = EmptyManager; + f.invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) + { + if (f.HasValue()) + { + InitializeStorage(std::forward(f)); + } + else + { + manager_ = EmptyManager; + invoker_ = nullptr; + } + } + + template> + void Initialize(F&& f) + { + InitializeStorage(std::forward(f)); + } + + // Use local (inline) storage for applicable target object types. + template>::value>> + void InitializeStorage(Args&&... args) + { + using RawT = RemoveCVRef; + ::new (static_cast(&state_.storage)) + RawT(std::forward(args)...); + + invoker_ = LocalInvoker; + // We can simplify our manager if we know the type is trivially copyable. + InitializeLocalManager(); + } + + // Use remote storage for target objects that cannot be stored locally. + template>::value, int> = 0> + void InitializeStorage(Args&&... args) + { + InitializeRemoteManager>(std::forward(args)...); + // This is set after everything else in case an exception is thrown in an + // earlier step of the initialization. + invoker_ = RemoteInvoker; + } + + template::value>> + void InitializeLocalManager() + { + manager_ = LocalManagerTrivial; + } + + template::value, int> = 0> + void InitializeLocalManager() + { + manager_ = LocalManagerNontrivial; + } + + template + using HasTrivialRemoteStorage = + std::integral_constant::value && alignof(T) <= ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; + + template::value>> + void InitializeRemoteManager(Args&&... args) + { + // unique_ptr is used for exception-safety in case construction throws. + std::unique_ptr uninitialized_target( + ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)) + ); + ::new (uninitialized_target.get()) T(std::forward(args)...); + state_.remote.target = uninitialized_target.release(); + state_.remote.size = sizeof(T); + manager_ = RemoteManagerTrivial; + } + + template::value, int> = 0> + void InitializeRemoteManager(Args&&... args) + { + state_.remote.target = ::new T(std::forward(args)...); + manager_ = RemoteManagerNontrivial; + } + + ////////////////////////////////////////////////////////////////////////////// + // + // Type trait to determine if the template argument is an AnyInvocable whose + // function type is compatible enough with ours such that we can + // "move the guts" out of it when moving, rather than having to place a new + // object into remote storage. + + template + struct IsCompatibleAnyInvocable + { + static constexpr bool value = false; + }; + + template + struct IsCompatibleAnyInvocable> + { + static constexpr bool value = + (IsCompatibleConversion)(static_cast::CoreImpl*>(nullptr), static_cast(nullptr)); + }; + + // + ////////////////////////////////////////////////////////////////////////////// + + TypeErasedState state_; + ManagerType* manager_; + InvokerType* invoker_; + }; + + // A constructor name-tag used with Impl to request the + // conversion-constructor + struct ConversionConstruct + { + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that is normally an identity metafunction except that when + // given a std::reference_wrapper, it yields T&. This is necessary because + // currently std::reference_wrapper's operator() is not conditionally noexcept, + // so when checking if such an Invocable is nothrow-invocable, we must pull out + // the underlying type. + template + struct UnwrapStdReferenceWrapperImpl + { + using type = T; + }; + + template + struct UnwrapStdReferenceWrapperImpl> + { + using type = T&; + }; + + template + using UnwrapStdReferenceWrapper = + typename UnwrapStdReferenceWrapperImpl::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // An alias that always yields std::true_type (used with constraints) where + // substitution failures happen when forming the template arguments. + template + using TrueAlias = + std::integral_constant*) != 0>; + + /*SFINAE constraints for the conversion-constructor.*/ + template, AnyInvocable>::value>> + using CanConvert = TrueAlias< + absl::enable_if_t>::value>, + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + + /*SFINAE constraints for the std::in_place constructors.*/ + template + using CanEmplace = TrueAlias< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, Args...>::value>>; + + /*SFINAE constraints for the conversion-assign operator.*/ + template, AnyInvocable>::value>> + using CanAssign = TrueAlias< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + + /*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ + template + using CanAssignReferenceWrapper = TrueAlias< + absl::enable_if_t< + Impl::template CallIsValid>::value>, + absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept< + std::reference_wrapper>::value>>; + +//////////////////////////////////////////////////////////////////////////////// +// +// The constraint for checking whether or not a call meets the noexcept +// callability requirements. This is a preprocessor macro because specifying it +// this way as opposed to a disjunction/branch can improve the user-side error +// messages and avoids an instantiation of std::is_nothrow_invocable_r in the +// cases where the user did not specify a noexcept function type. +// +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \ + ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) + +// The disjunction below is because we can't rely on std::is_nothrow_invocable_r +// to give the right result when ReturnType is non-moveable in toolchains that +// don't treat non-moveable result types correctly. For example this was the +// case in libc++ before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ + absl::enable_if_t> inv_quals, \ + P...>, \ + std::conjunction< \ + std::is_nothrow_invocable< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>, \ + std::is_same< \ + ReturnType, \ + absl::base_internal::invoke_result_t< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>>>>::value> + +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals) +// +//////////////////////////////////////////////////////////////////////////////// + +// A macro to generate partial specializations of Impl with the different +// combinations of supported cv/reference qualifiers and noexcept specifier. +// +// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any, +// inv_quals is the reference type to be used when invoking the target, and +// noex is "true" if the function type is noexcept, or false if it is not. +// +// The CallIsValid condition is more complicated than simply using +// absl::base_internal::is_invocable_r because we can't rely on it to give the +// right result when ReturnType is non-moveable in toolchains that don't treat +// non-moveable result types correctly. For example this was the case in libc++ +// before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ + template \ + class Impl : public CoreImpl \ + { \ + public: \ + /*The base class, which contains the datamembers and core operations*/ \ + using Core = CoreImpl; \ + \ + /*SFINAE constraint to check if F is invocable with the proper signature*/ \ + template \ + using CallIsValid = TrueAlias inv_quals, P...>, \ + std::is_same inv_quals, P...>>>::value>>; \ + \ + /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ + template \ + using CallIsNoexceptIfSigIsNoexcept = \ + TrueAlias; \ + \ + /*Put the AnyInvocable into an empty state.*/ \ + Impl() = default; \ + \ + /*The implementation of a conversion-constructor from "f*/ \ + /*This forwards to Core, attaching inv_quals so that the base class*/ \ + /*knows how to properly type-erase the invocation.*/ \ + template \ + explicit Impl(ConversionConstruct, F&& f) : Core(TypedConversionConstruct::type inv_quals>(), std::forward(f)) \ + { \ + } \ + \ + /*Forward along the in-place construction parameters.*/ \ + template \ + explicit Impl(absl::in_place_type_t, Args&&... args) : Core(absl::in_place_type inv_quals>, std::forward(args)...) \ + { \ + } \ + \ + /*Raises a fatal error when the AnyInvocable is invoked after a move*/ \ + static ReturnType InvokedAfterMove( \ + TypeErasedState*, \ + ForwardedParameterType

... \ + ) noexcept(noex) \ + { \ + ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \ + std::terminate(); \ + } \ + \ + InvokerType* ExtractInvoker() cv \ + { \ + using QualifiedTestType = int cv ref; \ + auto* invoker = this->invoker_; \ + if (!std::is_const::value && \ + std::is_rvalue_reference::value) \ + { \ + ABSL_ASSERT([this]() { \ + /* We checked that this isn't const above, so const_cast is safe */ \ + const_cast(this)->invoker_ = InvokedAfterMove; \ + return this->HasValue(); }()); \ + } \ + return invoker; \ + } \ + \ + /*The actual invocation operation with the proper signature*/ \ + ReturnType operator()(P... args) cv ref noexcept(noex) \ + { \ + assert(this->invoker_ != nullptr); \ + return this->ExtractInvoker()( \ + const_cast(&this->state_), \ + static_cast>(args)... \ + ); \ + } \ + } + +// Define the `noexcept(true)` specialization only for C++17 and beyond, when +// `noexcept` is part of the type system. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +// A convenience macro that defines specializations for the noexcept(true) and +// noexcept(false) forms, given the other properties. +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) +#else +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) +#endif + + // Non-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); + + // Lvalue-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); + + // Rvalue-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); + +// Undef the detail-only macros. +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_ +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT +#undef ABSL_INTERNAL_NOEXCEPT_SPEC + + } // namespace internal_any_invocable + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h new file mode 100644 index 00000000..78e352f1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h @@ -0,0 +1,96 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implementation details for `absl::bind_front()`. + +#ifndef ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ +#define ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace functional_internal + { + + // Invoke the method, expanding the tuple of bound arguments. + template + R Apply(Tuple&& bound, absl::index_sequence, Args&&... free) + { + return base_internal::invoke( + absl::forward(bound).template get()..., + absl::forward(free)... + ); + } + + template + class FrontBinder + { + using BoundArgsT = absl::container_internal::CompressedTuple; + using Idx = absl::make_index_sequence; + + BoundArgsT bound_args_; + + public: + template + constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) : + bound_args_(absl::forward(ts)...) + { + } + + template> + R operator()(FreeArgs&&... free_args) & + { + return functional_internal::Apply(bound_args_, Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) const& + { + return functional_internal::Apply(bound_args_, Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) && + { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) const&& + { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), absl::forward(free_args)...); + } + }; + + template + using bind_front_t = FrontBinder, absl::decay_t...>; + + } // namespace functional_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h new file mode 100644 index 00000000..26e92d27 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h @@ -0,0 +1,127 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ +#define ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/functional/any_invocable.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace functional_internal + { + + // Like a void* that can handle function pointers as well. The standard does not + // allow function pointers to round-trip through void*, but void(*)() is fine. + // + // Note: It's important that this class remains trivial and is the same size as + // a pointer, since this allows the compiler to perform tail-call optimizations + // when the underlying function is a callable object with a matching signature. + union VoidPtr + { + const void* obj; + void (*fun)(); + }; + + // Chooses the best type for passing T as an argument. + // Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are + // passed by value. + template::value> + struct PassByValue : std::false_type + { + }; + + template + struct PassByValue : std::integral_constant::value && absl::is_trivially_copy_assignable::type>::value && std::is_trivially_destructible::value && sizeof(T) <= 2 * sizeof(void*)> + { + }; + + template + struct ForwardT : std::conditional::value, T, T&&> + { + }; + + // An Invoker takes a pointer to the type-erased invokable object, followed by + // the arguments that the invokable object expects. + // + // Note: The order of arguments here is an optimization, since member functions + // have an implicit "this" pointer as their first argument, putting VoidPtr + // first allows the compiler to perform tail-call optimization in many cases. + template + using Invoker = R (*)(VoidPtr, typename ForwardT::type...); + + // + // InvokeObject and InvokeFunction provide static "Invoke" functions that can be + // used as Invokers for objects or functions respectively. + // + // static_cast handles the case the return type is void. + template + R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) + { + auto o = static_cast(ptr.obj); + return static_cast( + absl::base_internal::invoke(*o, std::forward(args)...) + ); + } + + template + R InvokeFunction(VoidPtr ptr, typename ForwardT::type... args) + { + auto f = reinterpret_cast(ptr.fun); + return static_cast( + absl::base_internal::invoke(f, std::forward(args)...) + ); + } + + template + void AssertNonNull(const std::function& f) + { + assert(f != nullptr); + (void)f; + } + + template + void AssertNonNull(const AnyInvocable& f) + { + assert(f != nullptr); + (void)f; + } + + template + void AssertNonNull(const F&) + { + } + + template + void AssertNonNull(F C::*f) + { + assert(f != nullptr); + (void)f; + } + + template + using EnableIf = typename ::std::enable_if::type; + + } // namespace functional_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash.h b/CAPI/cpp/grpc/include/absl/hash/hash.h new file mode 100644 index 00000000..e34cb25f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/hash.h @@ -0,0 +1,439 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hash.h +// ----------------------------------------------------------------------------- +// +// This header file defines the Abseil `hash` library and the Abseil hashing +// framework. This framework consists of the following: +// +// * The `absl::Hash` functor, which is used to invoke the hasher within the +// Abseil hashing framework. `absl::Hash` supports most basic types and +// a number of Abseil types out of the box. +// * `AbslHashValue`, an extension point that allows you to extend types to +// support Abseil hashing without requiring you to define a hashing +// algorithm. +// * `HashState`, a type-erased class which implements the manipulation of the +// hash state (H) itself; contains member functions `combine()`, +// `combine_contiguous()`, and `combine_unordered()`; and which you can use +// to contribute to an existing hash state when hashing your types. +// +// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework +// provides most of its utility by abstracting away the hash algorithm (and its +// implementation) entirely. Instead, a type invokes the Abseil hashing +// framework by simply combining its state with the state of known, hashable +// types. Hashing of that combined state is separately done by `absl::Hash`. +// +// One should assume that a hash algorithm is chosen randomly at the start of +// each process. E.g., `absl::Hash{}(9)` in one process and +// `absl::Hash{}(9)` in another process are likely to differ. +// +// `absl::Hash` may also produce different values from different dynamically +// loaded libraries. For this reason, `absl::Hash` values must never cross +// boundaries in dynamically loaded libraries (including when used in types like +// hash containers.) +// +// `absl::Hash` is intended to strongly mix input bits with a target of passing +// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect). +// +// Example: +// +// // Suppose we have a class `Circle` for which we want to add hashing: +// class Circle { +// public: +// ... +// private: +// std::pair center_; +// int radius_; +// }; +// +// // To add hashing support to `Circle`, we simply need to add a free +// // (non-member) function `AbslHashValue()`, and return the combined hash +// // state of the existing hash state and the class state. You can add such a +// // free function using a friend declaration within the body of the class: +// class Circle { +// public: +// ... +// template +// friend H AbslHashValue(H h, const Circle& c) { +// return H::combine(std::move(h), c.center_, c.radius_); +// } +// ... +// }; +// +// For more information, see Adding Type Support to `absl::Hash` below. +// +#ifndef ABSL_HASH_HASH_H_ +#define ABSL_HASH_HASH_H_ + +#include +#include + +#include "absl/functional/function_ref.h" +#include "absl/hash/internal/hash.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // `absl::Hash` + // ----------------------------------------------------------------------------- + // + // `absl::Hash` is a convenient general-purpose hash functor for any type `T` + // satisfying any of the following conditions (in order): + // + // * T is an arithmetic or pointer type + // * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary + // hash state `H`. + // - T defines a specialization of `std::hash` + // + // `absl::Hash` intrinsically supports the following types: + // + // * All integral types (including bool) + // * All enum types + // * All floating-point types (although hashing them is discouraged) + // * All pointer types, including nullptr_t + // * std::pair, if T1 and T2 are hashable + // * std::tuple, if all the Ts... are hashable + // * std::unique_ptr and std::shared_ptr + // * All string-like types including: + // * absl::Cord + // * std::string (as well as any instance of std::basic_string that + // uses one of {char, wchar_t, char16_t, char32_t} and its associated + // std::char_traits) + // * std::string_view (as well as any instance of std::basic_string_view + // that uses one of {char, wchar_t, char16_t, char32_t} and its associated + // std::char_traits) + // * All the standard sequence containers (provided the elements are hashable) + // * All the standard associative containers (provided the elements are + // hashable) + // * absl types such as the following: + // * absl::string_view + // * absl::uint128 + // * absl::Time, absl::Duration, and absl::TimeZone + // * absl containers (provided the elements are hashable) such as the + // following: + // * absl::flat_hash_set, absl::node_hash_set, absl::btree_set + // * absl::flat_hash_map, absl::node_hash_map, absl::btree_map + // * absl::btree_multiset, absl::btree_multimap + // * absl::InlinedVector + // * absl::FixedArray + // + // When absl::Hash is used to hash an unordered container with a custom hash + // functor, the elements are hashed using default absl::Hash semantics, not + // the custom hash functor. This is consistent with the behavior of + // operator==() on unordered containers, which compares elements pairwise with + // operator==() rather than the custom equality functor. It is usually a + // mistake to use either operator==() or absl::Hash on unordered collections + // that use functors incompatible with operator==() equality. + // + // Note: the list above is not meant to be exhaustive. Additional type support + // may be added, in which case the above list will be updated. + // + // ----------------------------------------------------------------------------- + // absl::Hash Invocation Evaluation + // ----------------------------------------------------------------------------- + // + // When invoked, `absl::Hash` searches for supplied hash functions in the + // following order: + // + // * Natively supported types out of the box (see above) + // * Types for which an `AbslHashValue()` overload is provided (such as + // user-defined types). See "Adding Type Support to `absl::Hash`" below. + // * Types which define a `std::hash` specialization + // + // The fallback to legacy hash functions exists mainly for backwards + // compatibility. If you have a choice, prefer defining an `AbslHashValue` + // overload instead of specializing any legacy hash functors. + // + // ----------------------------------------------------------------------------- + // The Hash State Concept, and using `HashState` for Type Erasure + // ----------------------------------------------------------------------------- + // + // The `absl::Hash` framework relies on the Concept of a "hash state." Such a + // hash state is used in several places: + // + // * Within existing implementations of `absl::Hash` to store the hashed + // state of an object. Note that it is up to the implementation how it stores + // such state. A hash table, for example, may mix the state to produce an + // integer value; a testing framework may simply hold a vector of that state. + // * Within implementations of `AbslHashValue()` used to extend user-defined + // types. (See "Adding Type Support to absl::Hash" below.) + // * Inside a `HashState`, providing type erasure for the concept of a hash + // state, which you can use to extend the `absl::Hash` framework for types + // that are otherwise difficult to extend using `AbslHashValue()`. (See the + // `HashState` class below.) + // + // The "hash state" concept contains three member functions for mixing hash + // state: + // + // * `H::combine(state, values...)` + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. Note that the existing hash state is move-only and must be + // passed by value. + // + // Each of the value types T must be hashable by H. + // + // NOTE: + // + // state = H::combine(std::move(state), value1, value2, value3); + // + // must be guaranteed to produce the same hash expansion as + // + // state = H::combine(std::move(state), value1); + // state = H::combine(std::move(state), value2); + // state = H::combine(std::move(state), value3); + // + // * `H::combine_contiguous(state, data, size)` + // + // Combines a contiguous array of `size` elements into a hash state, + // returning the updated state. Note that the existing hash state is + // move-only and must be passed by value. + // + // NOTE: + // + // state = H::combine_contiguous(std::move(state), data, size); + // + // need NOT be guaranteed to produce the same hash expansion as a loop + // (it may perform internal optimizations). If you need this guarantee, use a + // loop instead. + // + // * `H::combine_unordered(state, begin, end)` + // + // Combines a set of elements denoted by an iterator pair into a hash + // state, returning the updated state. Note that the existing hash + // state is move-only and must be passed by value. + // + // Unlike the other two methods, the hashing is order-independent. + // This can be used to hash unordered collections. + // + // ----------------------------------------------------------------------------- + // Adding Type Support to `absl::Hash` + // ----------------------------------------------------------------------------- + // + // To add support for your user-defined type, add a proper `AbslHashValue()` + // overload as a free (non-member) function. The overload will take an + // existing hash state and should combine that state with state from the type. + // + // Example: + // + // template + // H AbslHashValue(H state, const MyType& v) { + // return H::combine(std::move(state), v.field1, ..., v.fieldN); + // } + // + // where `(field1, ..., fieldN)` are the members you would use on your + // `operator==` to define equality. + // + // Notice that `AbslHashValue` is not a class member, but an ordinary function. + // An `AbslHashValue` overload for a type should only be declared in the same + // file and namespace as said type. The proper `AbslHashValue` implementation + // for a given type will be discovered via ADL. + // + // Note: unlike `std::hash', `absl::Hash` should never be specialized. It must + // only be extended by adding `AbslHashValue()` overloads. + // + template + using Hash = absl::hash_internal::Hash; + + // HashOf + // + // absl::HashOf() is a helper that generates a hash from the values of its + // arguments. It dispatches to absl::Hash directly, as follows: + // * HashOf(t) == absl::Hash{}(t) + // * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) + // + // HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when + // * The argument lists have pairwise identical C++ types + // * a1 == b1 && a2 == b2 && ... + // + // The requirement that the arguments match in both type and value is critical. + // It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if + // `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. + template + size_t HashOf(const Types&... values) + { + auto tuple = std::tie(values...); + return absl::Hash{}(tuple); + } + + // HashState + // + // A type erased version of the hash state concept, for use in user-defined + // `AbslHashValue` implementations that can't use templates (such as PImpl + // classes, virtual functions, etc.). The type erasure adds overhead so it + // should be avoided unless necessary. + // + // Note: This wrapper will only erase calls to + // combine_contiguous(H, const unsigned char*, size_t) + // RunCombineUnordered(H, CombinerF) + // + // All other calls will be handled internally and will not invoke overloads + // provided by the wrapped class. + // + // Users of this class should still define a template `AbslHashValue` function, + // but can use `absl::HashState::Create(&state)` to erase the type of the hash + // state and dispatch to their private hashing logic. + // + // This state can be used like any other hash state. In particular, you can call + // `HashState::combine()` and `HashState::combine_contiguous()` on it. + // + // Example: + // + // class Interface { + // public: + // template + // friend H AbslHashValue(H state, const Interface& value) { + // state = H::combine(std::move(state), std::type_index(typeid(*this))); + // value.HashValue(absl::HashState::Create(&state)); + // return state; + // } + // private: + // virtual void HashValue(absl::HashState state) const = 0; + // }; + // + // class Impl : Interface { + // private: + // void HashValue(absl::HashState state) const override { + // absl::HashState::combine(std::move(state), v1_, v2_); + // } + // int v1_; + // std::string v2_; + // }; + class HashState : public hash_internal::HashStateBase + { + public: + // HashState::Create() + // + // Create a new `HashState` instance that wraps `state`. All calls to + // `combine()` and `combine_contiguous()` on the new instance will be + // redirected to the original `state` object. The `state` object must outlive + // the `HashState` instance. + template + static HashState Create(T* state) + { + HashState s; + s.Init(state); + return s; + } + + HashState(const HashState&) = delete; + HashState& operator=(const HashState&) = delete; + HashState(HashState&&) = default; + HashState& operator=(HashState&&) = default; + + // HashState::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + using HashState::HashStateBase::combine; + + // HashState::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + static HashState combine_contiguous(HashState hash_state, const unsigned char* first, size_t size) + { + hash_state.combine_contiguous_(hash_state.state_, first, size); + return hash_state; + } + using HashState::HashStateBase::combine_contiguous; + + private: + HashState() = default; + + friend class HashState::HashStateBase; + + template + static void CombineContiguousImpl(void* p, const unsigned char* first, size_t size) + { + T& state = *static_cast(p); + state = T::combine_contiguous(std::move(state), first, size); + } + + template + void Init(T* state) + { + state_ = state; + combine_contiguous_ = &CombineContiguousImpl; + run_combine_unordered_ = &RunCombineUnorderedImpl; + } + + template + struct CombineUnorderedInvoker + { + template + void operator()(T inner_state, ConsumerT inner_cb) + { + f(HashState::Create(&inner_state), + [&](HashState& inner_erased) + { inner_cb(inner_erased.Real()); }); + } + + absl::FunctionRef)> f; + }; + + template + static HashState RunCombineUnorderedImpl( + HashState state, + absl::FunctionRef)> + f + ) + { + // Note that this implementation assumes that inner_state and outer_state + // are the same type. This isn't true in the SpyHash case, but SpyHash + // types are move-convertible to each other, so this still works. + T& real_state = state.Real(); + real_state = T::RunCombineUnordered( + std::move(real_state), CombineUnorderedInvoker{f} + ); + return state; + } + + template + static HashState RunCombineUnordered(HashState state, CombinerT combiner) + { + auto* run = state.run_combine_unordered_; + return run(std::move(state), std::ref(combiner)); + } + + // Do not erase an already erased state. + void Init(HashState* state) + { + state_ = state->state_; + combine_contiguous_ = state->combine_contiguous_; + run_combine_unordered_ = state->run_combine_unordered_; + } + + template + T& Real() + { + return *static_cast(state_); + } + + void* state_; + void (*combine_contiguous_)(void*, const unsigned char*, size_t); + HashState (*run_combine_unordered_)( + HashState state, + absl::FunctionRef)> + ); + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_HASH_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash_testing.h b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h new file mode 100644 index 00000000..5852ded3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h @@ -0,0 +1,430 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_HASH_HASH_TESTING_H_ +#define ABSL_HASH_HASH_TESTING_H_ + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/hash/internal/spy_hash_state.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/variant.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Run the absl::Hash algorithm over all the elements passed in and verify that + // their hash expansion is congruent with their `==` operator. + // + // It is used in conjunction with EXPECT_TRUE. Failures will output information + // on what requirement failed and on which objects. + // + // Users should pass a collection of types as either an initializer list or a + // container of cases. + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // {v1, v2, ..., vN})); + // + // std::vector cases; + // // Fill cases... + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); + // + // Users can pass a variety of types for testing heterogeneous lookup with + // `std::make_tuple`: + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // std::make_tuple(v1, v2, ..., vN))); + // + // + // Ideally, the values passed should provide enough coverage of the `==` + // operator and the AbslHashValue implementations. + // For dynamically sized types, the empty state should usually be included in + // the values. + // + // The function accepts an optional comparator function, in case that `==` is + // not enough for the values provided. + // + // Usage: + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // std::make_tuple(v1, v2, ..., vN), MyCustomEq{})); + // + // It checks the following requirements: + // 1. The expansion for a value is deterministic. + // 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates + // to true, then their hash expansion must be equal. + // 3. If `a == b` evaluates to false their hash expansion must be unequal. + // 4. If `a == b` evaluates to false neither hash expansion can be a + // suffix of the other. + // 5. AbslHashValue overloads should not be called by the user. They are only + // meant to be called by the framework. Users should call H::combine() and + // H::combine_contiguous(). + // 6. No moved-from instance of the hash state is used in the implementation + // of AbslHashValue. + // + // The values do not have to have the same type. This can be useful for + // equivalent types that support heterogeneous lookup. + // + // A possible reason for breaking (2) is combining state in the hash expansion + // that was not used in `==`. + // For example: + // + // struct Bad2 { + // int a, b; + // template + // friend H AbslHashValue(H state, Bad2 x) { + // // Uses a and b. + // return H::combine(std::move(state), x.a, x.b); + // } + // friend bool operator==(Bad2 x, Bad2 y) { + // // Only uses a. + // return x.a == y.a; + // } + // }; + // + // As for (3), breaking this usually means that there is state being passed to + // the `==` operator that is not used in the hash expansion. + // For example: + // + // struct Bad3 { + // int a, b; + // template + // friend H AbslHashValue(H state, Bad3 x) { + // // Only uses a. + // return H::combine(std::move(state), x.a); + // } + // friend bool operator==(Bad3 x, Bad3 y) { + // // Uses a and b. + // return x.a == y.a && x.b == y.b; + // } + // }; + // + // Finally, a common way to break 4 is by combining dynamic ranges without + // combining the size of the range. + // For example: + // + // struct Bad4 { + // int *p, size; + // template + // friend H AbslHashValue(H state, Bad4 x) { + // return H::combine_contiguous(std::move(state), x.p, x.p + x.size); + // } + // friend bool operator==(Bad4 x, Bad4 y) { + // // Compare two ranges for equality. C++14 code can instead use std::equal. + // return absl::equal(x.p, x.p + x.size, y.p, y.p + y.size); + // } + // }; + // + // An easy solution to this is to combine the size after combining the range, + // like so: + // template + // friend H AbslHashValue(H state, Bad4 x) { + // return H::combine( + // H::combine_contiguous(std::move(state), x.p, x.p + x.size), x.size); + // } + // + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, Eq equals); + + namespace hash_internal + { + + struct PrintVisitor + { + size_t index; + template + std::string operator()(const T* value) const + { + return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")"); + } + }; + + template + struct EqVisitor + { + Eq eq; + template + bool operator()(const T* t, const U* u) const + { + return eq(*t, *u); + } + }; + + struct ExpandVisitor + { + template + SpyHashState operator()(const T* value) const + { + return SpyHashState::combine(SpyHashState(), *value); + } + }; + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) + { + using V = typename Container::value_type; + + struct Info + { + const V& value; + size_t index; + std::string ToString() const + { + return absl::visit(PrintVisitor{index}, value); + } + SpyHashState expand() const + { + return absl::visit(ExpandVisitor{}, value); + } + }; + + using EqClass = std::vector; + std::vector classes; + + // Gather the values in equivalence classes. + size_t i = 0; + for (const auto& value : values) + { + EqClass* c = nullptr; + for (auto& eqclass : classes) + { + if (absl::visit(EqVisitor{equals}, value, eqclass[0].value)) + { + c = &eqclass; + break; + } + } + if (c == nullptr) + { + classes.emplace_back(); + c = &classes.back(); + } + c->push_back({value, i}); + ++i; + + // Verify potential errors captured by SpyHashState. + if (auto error = c->back().expand().error()) + { + return testing::AssertionFailure() << *error; + } + } + + if (classes.size() < 2) + { + return testing::AssertionFailure() + << "At least two equivalence classes are expected."; + } + + // We assume that equality is correctly implemented. + // Now we verify that AbslHashValue is also correctly implemented. + + for (const auto& c : classes) + { + // All elements of the equivalence class must have the same hash + // expansion. + const SpyHashState expected = c[0].expand(); + for (const Info& v : c) + { + if (v.expand() != v.expand()) + { + return testing::AssertionFailure() + << "Hash expansion for " << v.ToString() + << " is non-deterministic."; + } + if (v.expand() != expected) + { + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << v.ToString() + << " evaluate as equal but have an unequal hash expansion."; + } + } + + // Elements from other classes must have different hash expansion. + for (const auto& c2 : classes) + { + if (&c == &c2) + continue; + const SpyHashState c2_hash = c2[0].expand(); + switch (SpyHashState::Compare(expected, c2_hash)) + { + case SpyHashState::CompareResult::kEqual: + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << c2[0].ToString() + << " evaluate as unequal but have an equal hash expansion."; + case SpyHashState::CompareResult::kBSuffixA: + return testing::AssertionFailure() + << "Hash expansion of " << c2[0].ToString() + << " is a suffix of the hash expansion of " << c[0].ToString() + << "."; + case SpyHashState::CompareResult::kASuffixB: + return testing::AssertionFailure() + << "Hash expansion of " << c[0].ToString() + << " is a suffix of the hash expansion of " << c2[0].ToString() + << "."; + case SpyHashState::CompareResult::kUnequal: + break; + } + } + } + return testing::AssertionSuccess(); + } + + template + struct TypeSet + { + template...>::value> + struct Insert + { + using type = TypeSet; + }; + template + struct Insert + { + using type = TypeSet; + }; + + template class C> + using apply = C; + }; + + template + struct MakeTypeSet : TypeSet<> + { + }; + template + struct MakeTypeSet : MakeTypeSet::template Insert::type + { + }; + + template + using VariantForTypes = typename MakeTypeSet< + const typename std::decay::type*...>::template apply; + + template + struct ContainerAsVector + { + using V = absl::variant; + using Out = std::vector; + + static Out Do(const Container& values) + { + Out out; + for (const auto& v : values) + out.push_back(&v); + return out; + } + }; + + template + struct ContainerAsVector> + { + using V = VariantForTypes; + using Out = std::vector; + + template + static Out DoImpl(const std::tuple& tuple, absl::index_sequence) + { + return Out{&std::get(tuple)...}; + } + + static Out Do(const std::tuple& values) + { + return DoImpl(values, absl::index_sequence_for()); + } + }; + + template<> + struct ContainerAsVector> + { + static std::vector> Do(std::tuple<>) + { + return {}; + } + }; + + struct DefaultEquals + { + template + bool operator()(const T& t, const U& u) const + { + return t == u; + } + }; + + } // namespace hash_internal + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector::Do(values), + hash_internal::DefaultEquals{} + ); + } + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector::Do(values), equals + ); + } + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector>::Do(values), + hash_internal::DefaultEquals{} + ); + } + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, Eq equals) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector>::Do(values), + equals + ); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_HASH_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/internal/city.h b/CAPI/cpp/grpc/include/absl/hash/internal/city.h new file mode 100644 index 00000000..06394243 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/internal/city.h @@ -0,0 +1,79 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// https://code.google.com/p/cityhash/ +// +// This file provides a few functions for hashing strings. All of them are +// high-quality functions in the sense that they pass standard tests such +// as Austin Appleby's SMHasher. They are also fast. +// +// For 64-bit x86 code, on short strings, we don't know of anything faster than +// CityHash64 that is of comparable quality. We believe our nearest competitor +// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash +// tables and most other hashing (excluding cryptography). +// +// For 32-bit x86 code, we don't know of anything faster than CityHash32 that +// is of comparable quality. We believe our nearest competitor is Murmur3A. +// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.) +// +// Functions in the CityHash family are not suitable for cryptography. +// +// Please see CityHash's README file for more details on our performance +// measurements and so on. +// +// WARNING: This code has been only lightly tested on big-endian platforms! +// It is known to work well on little-endian platforms that have a small penalty +// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs. +// It should work on all 32-bit and 64-bit platforms that allow unaligned reads; +// bug reports are welcome. +// +// By the way, for some hash functions, given strings a and b, the hash +// of a+b is easily derived from the hashes of a and b. This property +// doesn't hold for any hash functions in this file. + +#ifndef ABSL_HASH_INTERNAL_CITY_H_ +#define ABSL_HASH_INTERNAL_CITY_H_ + +#include +#include // for size_t. + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace hash_internal + { + + // Hash function for a byte array. + uint64_t CityHash64(const char* s, size_t len); + + // Hash function for a byte array. For convenience, a 64-bit seed is also + // hashed into the result. + uint64_t CityHash64WithSeed(const char* s, size_t len, uint64_t seed); + + // Hash function for a byte array. For convenience, two seeds are also + // hashed into the result. + uint64_t CityHash64WithSeeds(const char* s, size_t len, uint64_t seed0, uint64_t seed1); + + // Hash function for a byte array. Most useful in 32-bit binaries. + uint32_t CityHash32(const char* s, size_t len); + + } // namespace hash_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_CITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/internal/hash.h b/CAPI/cpp/grpc/include/absl/hash/internal/hash.h new file mode 100644 index 00000000..5047530f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/internal/hash.h @@ -0,0 +1,1437 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hash.h +// ----------------------------------------------------------------------------- +// +#ifndef ABSL_HASH_INTERNAL_HASH_H_ +#define ABSL_HASH_INTERNAL_HASH_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" +#include "absl/container/fixed_array.h" +#include "absl/hash/internal/city.h" +#include "absl/hash/internal/low_level_hash.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "absl/types/variant.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_STD_STRING_VIEW +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class HashState; + + namespace hash_internal + { + + // Internal detail: Large buffers are hashed in smaller chunks. This function + // returns the size of these chunks. + constexpr size_t PiecewiseChunkSize() + { + return 1024; + } + + // PiecewiseCombiner + // + // PiecewiseCombiner is an internal-only helper class for hashing a piecewise + // buffer of `char` or `unsigned char` as though it were contiguous. This class + // provides two methods: + // + // H add_buffer(state, data, size) + // H finalize(state) + // + // `add_buffer` can be called zero or more times, followed by a single call to + // `finalize`. This will produce the same hash expansion as concatenating each + // buffer piece into a single contiguous buffer, and passing this to + // `H::combine_contiguous`. + // + // Example usage: + // PiecewiseCombiner combiner; + // for (const auto& piece : pieces) { + // state = combiner.add_buffer(std::move(state), piece.data, piece.size); + // } + // return combiner.finalize(std::move(state)); + class PiecewiseCombiner + { + public: + PiecewiseCombiner() : + position_(0) + { + } + PiecewiseCombiner(const PiecewiseCombiner&) = delete; + PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete; + + // PiecewiseCombiner::add_buffer() + // + // Appends the given range of bytes to the sequence to be hashed, which may + // modify the provided hash state. + template + H add_buffer(H state, const unsigned char* data, size_t size); + template + H add_buffer(H state, const char* data, size_t size) + { + return add_buffer(std::move(state), reinterpret_cast(data), size); + } + + // PiecewiseCombiner::finalize() + // + // Finishes combining the hash sequence, which may may modify the provided + // hash state. + // + // Once finalize() is called, add_buffer() may no longer be called. The + // resulting hash state will be the same as if the pieces passed to + // add_buffer() were concatenated into a single flat buffer, and then provided + // to H::combine_contiguous(). + template + H finalize(H state); + + private: + unsigned char buf_[PiecewiseChunkSize()]; + size_t position_; + }; + + // is_hashable() + // + // Trait class which returns true if T is hashable by the absl::Hash framework. + // Used for the AbslHashValue implementations for composite types below. + template + struct is_hashable; + + // HashStateBase + // + // An internal implementation detail that contains common implementation details + // for all of the "hash state objects" objects generated by Abseil. This is not + // a public API; users should not create classes that inherit from this. + // + // A hash state object is the template argument `H` passed to `AbslHashValue`. + // It represents an intermediate state in the computation of an unspecified hash + // algorithm. `HashStateBase` provides a CRTP style base class for hash state + // implementations. Developers adding type support for `absl::Hash` should not + // rely on any parts of the state object other than the following member + // functions: + // + // * HashStateBase::combine() + // * HashStateBase::combine_contiguous() + // * HashStateBase::combine_unordered() + // + // A derived hash state class of type `H` must provide a public member function + // with a signature similar to the following: + // + // `static H combine_contiguous(H state, const unsigned char*, size_t)`. + // + // It must also provide a private template method named RunCombineUnordered. + // + // A "consumer" is a 1-arg functor returning void. Its argument is a reference + // to an inner hash state object, and it may be called multiple times. When + // called, the functor consumes the entropy from the provided state object, + // and resets that object to its empty state. + // + // A "combiner" is a stateless 2-arg functor returning void. Its arguments are + // an inner hash state object and an ElementStateConsumer functor. A combiner + // uses the provided inner hash state object to hash each element of the + // container, passing the inner hash state object to the consumer after hashing + // each element. + // + // Given these definitions, a derived hash state class of type H + // must provide a private template method with a signature similar to the + // following: + // + // `template ` + // `static H RunCombineUnordered(H outer_state, CombinerT combiner)` + // + // This function is responsible for constructing the inner state object and + // providing a consumer to the combiner. It uses side effects of the consumer + // and combiner to mix the state of each element in an order-independent manner, + // and uses this to return an updated value of `outer_state`. + // + // This inside-out approach generates efficient object code in the normal case, + // but allows us to use stack storage to implement the absl::HashState type + // erasure mechanism (avoiding heap allocations while hashing). + // + // `HashStateBase` will provide a complete implementation for a hash state + // object in terms of these two methods. + // + // Example: + // + // // Use CRTP to define your derived class. + // struct MyHashState : HashStateBase { + // static H combine_contiguous(H state, const unsigned char*, size_t); + // using MyHashState::HashStateBase::combine; + // using MyHashState::HashStateBase::combine_contiguous; + // using MyHashState::HashStateBase::combine_unordered; + // private: + // template + // static H RunCombineUnordered(H state, CombinerT combiner); + // }; + template + class HashStateBase + { + public: + // HashStateBase::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + // + // Each of the value types `T` must be separately hashable by the Abseil + // hashing framework. + // + // NOTE: + // + // state = H::combine(std::move(state), value1, value2, value3); + // + // is guaranteed to produce the same hash expansion as: + // + // state = H::combine(std::move(state), value1); + // state = H::combine(std::move(state), value2); + // state = H::combine(std::move(state), value3); + template + static H combine(H state, const T& value, const Ts&... values); + static H combine(H state) + { + return state; + } + + // HashStateBase::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + // + // NOTE: + // + // state = H::combine_contiguous(std::move(state), data, size); + // + // is NOT guaranteed to produce the same hash expansion as a for-loop (it may + // perform internal optimizations). If you need this guarantee, use the + // for-loop instead. + template + static H combine_contiguous(H state, const T* data, size_t size); + + template + static H combine_unordered(H state, I begin, I end); + + using AbslInternalPiecewiseCombiner = PiecewiseCombiner; + + template + using is_hashable = absl::hash_internal::is_hashable; + + private: + // Common implementation of the iteration step of a "combiner", as described + // above. + template + struct CombineUnorderedCallback + { + I begin; + I end; + + template + void operator()(InnerH inner_state, ElementStateConsumer cb) + { + for (; begin != end; ++begin) + { + inner_state = H::combine(std::move(inner_state), *begin); + cb(inner_state); + } + } + }; + }; + + // is_uniquely_represented + // + // `is_uniquely_represented` is a trait class that indicates whether `T` + // is uniquely represented. + // + // A type is "uniquely represented" if two equal values of that type are + // guaranteed to have the same bytes in their underlying storage. In other + // words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be + // zero. This property cannot be detected automatically, so this trait is false + // by default, but can be specialized by types that wish to assert that they are + // uniquely represented. This makes them eligible for certain optimizations. + // + // If you have any doubt whatsoever, do not specialize this template. + // The default is completely safe, and merely disables some optimizations + // that will not matter for most types. Specializing this template, + // on the other hand, can be very hazardous. + // + // To be uniquely represented, a type must not have multiple ways of + // representing the same value; for example, float and double are not + // uniquely represented, because they have distinct representations for + // +0 and -0. Furthermore, the type's byte representation must consist + // solely of user-controlled data, with no padding bits and no compiler- + // controlled data such as vptrs or sanitizer metadata. This is usually + // very difficult to guarantee, because in most cases the compiler can + // insert data and padding bits at its own discretion. + // + // If you specialize this template for a type `T`, you must do so in the file + // that defines that type (or in this file). If you define that specialization + // anywhere else, `is_uniquely_represented` could have different meanings + // in different places. + // + // The Enable parameter is meaningless; it is provided as a convenience, + // to support certain SFINAE techniques when defining specializations. + template + struct is_uniquely_represented : std::false_type + { + }; + + // is_uniquely_represented + // + // unsigned char is a synonym for "byte", so it is guaranteed to be + // uniquely represented. + template<> + struct is_uniquely_represented : std::true_type + { + }; + + // is_uniquely_represented for non-standard integral types + // + // Integral types other than bool should be uniquely represented on any + // platform that this will plausibly be ported to. + template + struct is_uniquely_represented< + Integral, + typename std::enable_if::value>::type> : std::true_type + { + }; + + // is_uniquely_represented + // + // + template<> + struct is_uniquely_represented : std::false_type + { + }; + + // hash_bytes() + // + // Convenience function that combines `hash_state` with the byte representation + // of `value`. + template + H hash_bytes(H hash_state, const T& value) + { + const unsigned char* start = reinterpret_cast(&value); + return H::combine_contiguous(std::move(hash_state), start, sizeof(value)); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Basic Types + // ----------------------------------------------------------------------------- + + // Note: Default `AbslHashValue` implementations live in `hash_internal`. This + // allows us to block lexical scope lookup when doing an unqualified call to + // `AbslHashValue` below. User-defined implementations of `AbslHashValue` can + // only be found via ADL. + + // AbslHashValue() for hashing bool values + // + // We use SFINAE to ensure that this overload only accepts bool, not types that + // are convertible to bool. + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, B value + ) + { + return H::combine(std::move(hash_state), static_cast(value ? 1 : 0)); + } + + // AbslHashValue() for hashing enum values + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, Enum e + ) + { + // In practice, we could almost certainly just invoke hash_bytes directly, + // but it's possible that a sanitizer might one day want to + // store data in the unused bits of an enum. To avoid that risk, we + // convert to the underlying type before hashing. Hopefully this will get + // optimized away; if not, we can reopen discussion with c-toolchain-team. + return H::combine(std::move(hash_state), static_cast::type>(e)); + } + // AbslHashValue() for hashing floating-point values + template + typename std::enable_if::value || std::is_same::value, H>::type + AbslHashValue(H hash_state, Float value) + { + return hash_internal::hash_bytes(std::move(hash_state), value == 0 ? 0 : value); + } + + // Long double has the property that it might have extra unused bytes in it. + // For example, in x86 sizeof(long double)==16 but it only really uses 80-bits + // of it. This means we can't use hash_bytes on a long double and have to + // convert it to something else first. + template + typename std::enable_if::value, H>::type + AbslHashValue(H hash_state, LongDouble value) + { + const int category = std::fpclassify(value); + switch (category) + { + case FP_INFINITE: + // Add the sign bit to differentiate between +Inf and -Inf + hash_state = H::combine(std::move(hash_state), std::signbit(value)); + break; + + case FP_NAN: + case FP_ZERO: + default: + // Category is enough for these. + break; + + case FP_NORMAL: + case FP_SUBNORMAL: + // We can't convert `value` directly to double because this would have + // undefined behavior if the value is out of range. + // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is + // guaranteed to be in range for `double`. The truncation is + // implementation defined, but that works as long as it is deterministic. + int exp; + auto mantissa = static_cast(std::frexp(value, &exp)); + hash_state = H::combine(std::move(hash_state), mantissa, exp); + } + + return H::combine(std::move(hash_state), category); + } + + // AbslHashValue() for hashing pointers + template + H AbslHashValue(H hash_state, T* ptr) + { + auto v = reinterpret_cast(ptr); + // Due to alignment, pointers tend to have low bits as zero, and the next few + // bits follow a pattern since they are also multiples of some base value. + // Mixing the pointer twice helps prevent stuck low bits for certain alignment + // values. + return H::combine(std::move(hash_state), v, v); + } + + // AbslHashValue() for hashing nullptr_t + template + H AbslHashValue(H hash_state, std::nullptr_t) + { + return H::combine(std::move(hash_state), static_cast(nullptr)); + } + + // AbslHashValue() for hashing pointers-to-member + template + H AbslHashValue(H hash_state, T C::*ptr) + { + auto salient_ptm_size = [](std::size_t n) -> std::size_t + { +#if defined(_MSC_VER) + // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2, + // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain + // padding (namely when they have 1 or 3 ints). The value below is a lower + // bound on the number of salient, non-padding bytes that we use for + // hashing. + if (alignof(T C::*) == alignof(int)) + { + // No padding when all subobjects have the same size as the total + // alignment. This happens in 32-bit mode. + return n; + } + else + { + // Padding for 1 int (size 16) or 3 ints (size 24). + // With 2 ints, the size is 16 with no padding, which we pessimize. + return n == 24 ? 20 : n == 16 ? 12 : + n; + } +#else + // On other platforms, we assume that pointers-to-members do not have + // padding. +#ifdef __cpp_lib_has_unique_object_representations + static_assert(std::has_unique_object_representations::value); +#endif // __cpp_lib_has_unique_object_representations + return n; +#endif + }; + return H::combine_contiguous(std::move(hash_state), reinterpret_cast(&ptr), salient_ptm_size(sizeof ptr)); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Composite Types + // ----------------------------------------------------------------------------- + + // AbslHashValue() for hashing pairs + template + typename std::enable_if::value && is_hashable::value, H>::type + AbslHashValue(H hash_state, const std::pair& p) + { + return H::combine(std::move(hash_state), p.first, p.second); + } + + // hash_tuple() + // + // Helper function for hashing a tuple. The third argument should + // be an index_sequence running from 0 to tuple_size - 1. + template + H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence) + { + return H::combine(std::move(hash_state), std::get(t)...); + } + + // AbslHashValue for hashing tuples + template +#if defined(_MSC_VER) + // This SFINAE gets MSVC confused under some conditions. Let's just disable it + // for now. + H +#else // _MSC_VER + typename std::enable_if...>::value, H>::type +#endif // _MSC_VER + AbslHashValue(H hash_state, const std::tuple& t) + { + return hash_internal::hash_tuple(std::move(hash_state), t, absl::make_index_sequence()); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Pointers + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing unique_ptr + template + H AbslHashValue(H hash_state, const std::unique_ptr& ptr) + { + return H::combine(std::move(hash_state), ptr.get()); + } + + // AbslHashValue for hashing shared_ptr + template + H AbslHashValue(H hash_state, const std::shared_ptr& ptr) + { + return H::combine(std::move(hash_state), ptr.get()); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for String-Like Types + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing strings + // + // All the string-like types supported here provide the same hash expansion for + // the same character sequence. These types are: + // + // - `absl::Cord` + // - `std::string` (and std::basic_string, A> for + // any allocator A and any T in {char, wchar_t, char16_t, char32_t}) + // - `absl::string_view`, `std::string_view`, `std::wstring_view`, + // `std::u16string_view`, and `std::u32_string_view`. + // + // For simplicity, we currently support only strings built on `char`, `wchar_t`, + // `char16_t`, or `char32_t`. This support may be broadened, if necessary, but + // with some caution - this overload would misbehave in cases where the traits' + // `eq()` member isn't equivalent to `==` on the underlying character type. + template + H AbslHashValue(H hash_state, absl::string_view str) + { + return H::combine( + H::combine_contiguous(std::move(hash_state), str.data(), str.size()), + str.size() + ); + } + + // Support std::wstring, std::u16string and std::u32string. + template::value || std::is_same::value || std::is_same::value>> + H AbslHashValue( + H hash_state, + const std::basic_string, Alloc>& str + ) + { + return H::combine( + H::combine_contiguous(std::move(hash_state), str.data(), str.size()), + str.size() + ); + } + +#ifdef ABSL_HAVE_STD_STRING_VIEW + + // Support std::wstring_view, std::u16string_view and std::u32string_view. + template::value || std::is_same::value || std::is_same::value>> + H AbslHashValue(H hash_state, std::basic_string_view str) + { + return H::combine( + H::combine_contiguous(std::move(hash_state), str.data(), str.size()), + str.size() + ); + } + +#endif // ABSL_HAVE_STD_STRING_VIEW + + // ----------------------------------------------------------------------------- + // AbslHashValue for Sequence Containers + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing std::array + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::array& array + ) + { + return H::combine_contiguous(std::move(hash_state), array.data(), array.size()); + } + + // AbslHashValue for hashing std::deque + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::deque& deque + ) + { + // TODO(gromer): investigate a more efficient implementation taking + // advantage of the chunk structure. + for (const auto& t : deque) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), deque.size()); + } + + // AbslHashValue for hashing std::forward_list + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::forward_list& list + ) + { + size_t size = 0; + for (const T& t : list) + { + hash_state = H::combine(std::move(hash_state), t); + ++size; + } + return H::combine(std::move(hash_state), size); + } + + // AbslHashValue for hashing std::list + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::list& list + ) + { + for (const auto& t : list) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), list.size()); + } + + // AbslHashValue for hashing std::vector + // + // Do not use this for vector on platforms that have a working + // implementation of std::hash. It does not have a .data(), and a fallback for + // std::hash<> is most likely faster. + template + typename std::enable_if::value && !std::is_same::value, H>::type + AbslHashValue(H hash_state, const std::vector& vector) + { + return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(), vector.size()), vector.size()); + } + + // AbslHashValue special cases for hashing std::vector + +#if defined(ABSL_IS_BIG_ENDIAN) && \ + (defined(__GLIBCXX__) || defined(__GLIBCPP__)) + + // std::hash in libstdc++ does not work correctly with vector on Big + // Endian platforms therefore we need to implement a custom AbslHashValue for + // it. More details on the bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 + template + typename std::enable_if::value && std::is_same::value, H>::type + AbslHashValue(H hash_state, const std::vector& vector) + { + typename H::AbslInternalPiecewiseCombiner combiner; + for (const auto& i : vector) + { + unsigned char c = static_cast(i); + hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); + } + return H::combine(combiner.finalize(std::move(hash_state)), vector.size()); + } +#else + // When not working around the libstdc++ bug above, we still have to contend + // with the fact that std::hash> is often poor quality, hashing + // directly on the internal words and on no other state. On these platforms, + // vector{1, 1} and vector{1, 1, 0} hash to the same value. + // + // Mixing in the size (as we do in our other vector<> implementations) on top + // of the library-provided hash implementation avoids this QOI issue. + template + typename std::enable_if::value && std::is_same::value, H>::type + AbslHashValue(H hash_state, const std::vector& vector) + { + return H::combine(std::move(hash_state), std::hash>{}(vector), vector.size()); + } +#endif + + // ----------------------------------------------------------------------------- + // AbslHashValue for Ordered Associative Containers + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing std::map + template + typename std::enable_if::value && is_hashable::value, H>::type + AbslHashValue(H hash_state, const std::map& map) + { + for (const auto& t : map) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), map.size()); + } + + // AbslHashValue for hashing std::multimap + template + typename std::enable_if::value && is_hashable::value, H>::type + AbslHashValue(H hash_state, const std::multimap& map) + { + for (const auto& t : map) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), map.size()); + } + + // AbslHashValue for hashing std::set + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::set& set + ) + { + for (const auto& t : set) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), set.size()); + } + + // AbslHashValue for hashing std::multiset + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::multiset& set + ) + { + for (const auto& t : set) + { + hash_state = H::combine(std::move(hash_state), t); + } + return H::combine(std::move(hash_state), set.size()); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Unordered Associative Containers + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing std::unordered_set + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const std::unordered_set& s + ) + { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size() + ); + } + + // AbslHashValue for hashing std::unordered_multiset + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, + const std::unordered_multiset& s + ) + { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size() + ); + } + + // AbslHashValue for hashing std::unordered_set + template + typename std::enable_if::value && is_hashable::value, H>::type + AbslHashValue(H hash_state, const std::unordered_map& s) + { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size() + ); + } + + // AbslHashValue for hashing std::unordered_multiset + template + typename std::enable_if::value && is_hashable::value, H>::type + AbslHashValue(H hash_state, const std::unordered_multimap& s) + { + return H::combine( + H::combine_unordered(std::move(hash_state), s.begin(), s.end()), + s.size() + ); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Wrapper Types + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing std::reference_wrapper + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, std::reference_wrapper opt + ) + { + return H::combine(std::move(hash_state), opt.get()); + } + + // AbslHashValue for hashing absl::optional + template + typename std::enable_if::value, H>::type AbslHashValue( + H hash_state, const absl::optional& opt + ) + { + if (opt) + hash_state = H::combine(std::move(hash_state), *opt); + return H::combine(std::move(hash_state), opt.has_value()); + } + + // VariantVisitor + template + struct VariantVisitor + { + H&& hash_state; + template + H operator()(const T& t) const + { + return H::combine(std::move(hash_state), t); + } + }; + + // AbslHashValue for hashing absl::variant + template + typename std::enable_if...>::value, H>::type + AbslHashValue(H hash_state, const absl::variant& v) + { + if (!v.valueless_by_exception()) + { + hash_state = absl::visit(VariantVisitor{std::move(hash_state)}, v); + } + return H::combine(std::move(hash_state), v.index()); + } + + // ----------------------------------------------------------------------------- + // AbslHashValue for Other Types + // ----------------------------------------------------------------------------- + + // AbslHashValue for hashing std::bitset is not defined on Little Endian + // platforms, for the same reason as for vector (see std::vector above): + // It does not expose the raw bytes, and a fallback to std::hash<> is most + // likely faster. + +#if defined(ABSL_IS_BIG_ENDIAN) && \ + (defined(__GLIBCXX__) || defined(__GLIBCPP__)) + // AbslHashValue for hashing std::bitset + // + // std::hash in libstdc++ does not work correctly with std::bitset on Big Endian + // platforms therefore we need to implement a custom AbslHashValue for it. More + // details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 + template + H AbslHashValue(H hash_state, const std::bitset& set) + { + typename H::AbslInternalPiecewiseCombiner combiner; + for (int i = 0; i < N; i++) + { + unsigned char c = static_cast(set[i]); + hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); + } + return H::combine(combiner.finalize(std::move(hash_state)), N); + } +#endif + + // ----------------------------------------------------------------------------- + + // hash_range_or_bytes() + // + // Mixes all values in the range [data, data+size) into the hash state. + // This overload accepts only uniquely-represented types, and hashes them by + // hashing the entire range of bytes. + template + typename std::enable_if::value, H>::type + hash_range_or_bytes(H hash_state, const T* data, size_t size) + { + const auto* bytes = reinterpret_cast(data); + return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size); + } + + // hash_range_or_bytes() + template + typename std::enable_if::value, H>::type + hash_range_or_bytes(H hash_state, const T* data, size_t size) + { + for (const auto end = data + size; data < end; ++data) + { + hash_state = H::combine(std::move(hash_state), *data); + } + return hash_state; + } + +#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \ + ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ +#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1 +#else +#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0 +#endif + + // HashSelect + // + // Type trait to select the appropriate hash implementation to use. + // HashSelect::type will give the proper hash implementation, to be invoked + // as: + // HashSelect::type::Invoke(state, value) + // Also, HashSelect::type::value is a boolean equal to `true` if there is a + // valid `Invoke` function. Types that are not hashable will have a ::value of + // `false`. + struct HashSelect + { + private: + struct State : HashStateBase + { + static State combine_contiguous(State hash_state, const unsigned char*, size_t); + using State::HashStateBase::combine_contiguous; + }; + + struct UniquelyRepresentedProbe + { + template + static auto Invoke(H state, const T& value) + -> absl::enable_if_t::value, H> + { + return hash_internal::hash_bytes(std::move(state), value); + } + }; + + struct HashValueProbe + { + template + static auto Invoke(H state, const T& value) -> absl::enable_if_t< + std::is_same::value, + H> + { + return AbslHashValue(std::move(state), value); + } + }; + + struct LegacyHashProbe + { +#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + template + static auto Invoke(H state, const T& value) -> absl::enable_if_t< + std::is_convertible< + decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash()(value)), + size_t>::value, + H> + { + return hash_internal::hash_bytes( + std::move(state), + ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash{}(value) + ); + } +#endif // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ + }; + + struct StdHashProbe + { + template + static auto Invoke(H state, const T& value) + -> absl::enable_if_t::value, H> + { + return hash_internal::hash_bytes(std::move(state), std::hash{}(value)); + } + }; + + template + struct Probe : Hash + { + private: + template(), std::declval()))> + static std::true_type Test(int); + template + static std::false_type Test(char); + + public: + static constexpr bool value = decltype(Test(0))::value; + }; + + public: + // Probe each implementation in order. + // disjunction provides short circuiting wrt instantiation. + template + using Apply = absl::disjunction< // + Probe, // + Probe, // + Probe, // + Probe, // + std::false_type>; + }; + + template + struct is_hashable : std::integral_constant::value> + { + }; + + // MixingHashState + class ABSL_DLL MixingHashState : public HashStateBase + { + // absl::uint128 is not an alias or a thin wrapper around the intrinsic. + // We use the intrinsic when available to improve performance. +#ifdef ABSL_HAVE_INTRINSIC_INT128 + using uint128 = __uint128_t; +#else // ABSL_HAVE_INTRINSIC_INT128 + using uint128 = absl::uint128; +#endif // ABSL_HAVE_INTRINSIC_INT128 + + static constexpr uint64_t kMul = + sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51} : uint64_t{0x9ddfea08eb382d69}; + + template + using IntegralFastPath = + conjunction, is_uniquely_represented>; + + public: + // Move only + MixingHashState(MixingHashState&&) = default; + MixingHashState& operator=(MixingHashState&&) = default; + + // MixingHashState::combine_contiguous() + // + // Fundamental base case for hash recursion: mixes the given range of bytes + // into the hash state. + static MixingHashState combine_contiguous(MixingHashState hash_state, const unsigned char* first, size_t size) + { + return MixingHashState( + CombineContiguousImpl(hash_state.state_, first, size, std::integral_constant{}) + ); + } + using MixingHashState::HashStateBase::combine_contiguous; + + // MixingHashState::hash() + // + // For performance reasons in non-opt mode, we specialize this for + // integral types. + // Otherwise we would be instantiating and calling dozens of functions for + // something that is just one multiplication and a couple xor's. + // The result should be the same as running the whole algorithm, but faster. + template::value, int> = 0> + static size_t hash(T value) + { + return static_cast( + Mix(Seed(), static_cast>(value)) + ); + } + + // Overload of MixingHashState::hash() + template::value, int> = 0> + static size_t hash(const T& value) + { + return static_cast(combine(MixingHashState{}, value).state_); + } + + private: + // Invoked only once for a given argument; that plus the fact that this is + // move-only ensures that there is only one non-moved-from object. + MixingHashState() : + state_(Seed()) + { + } + + friend class MixingHashState::HashStateBase; + + template + static MixingHashState RunCombineUnordered(MixingHashState state, CombinerT combiner) + { + uint64_t unordered_state = 0; + combiner(MixingHashState{}, [&](MixingHashState& inner_state) + { + // Add the hash state of the element to the running total, but mix the + // carry bit back into the low bit. This in intended to avoid losing + // entropy to overflow, especially when unordered_multisets contain + // multiple copies of the same value. + auto element_state = inner_state.state_; + unordered_state += element_state; + if (unordered_state < element_state) { + ++unordered_state; + } + inner_state = MixingHashState{}; }); + return MixingHashState::combine(std::move(state), unordered_state); + } + + // Allow the HashState type-erasure implementation to invoke + // RunCombinedUnordered() directly. + friend class absl::HashState; + + // Workaround for MSVC bug. + // We make the type copyable to fix the calling convention, even though we + // never actually copy it. Keep it private to not affect the public API of the + // type. + MixingHashState(const MixingHashState&) = default; + + explicit MixingHashState(uint64_t state) : + state_(state) + { + } + + // Implementation of the base case for combine_contiguous where we actually + // mix the bytes into the state. + // Dispatch to different implementations of the combine_contiguous depending + // on the value of `sizeof(size_t)`. + static uint64_t CombineContiguousImpl(uint64_t state, const unsigned char* first, size_t len, std::integral_constant + /* sizeof_size_t */); + static uint64_t CombineContiguousImpl(uint64_t state, const unsigned char* first, size_t len, std::integral_constant + /* sizeof_size_t */); + + // Slow dispatch path for calls to CombineContiguousImpl with a size argument + // larger than PiecewiseChunkSize(). Has the same effect as calling + // CombineContiguousImpl() repeatedly with the chunk stride size. + static uint64_t CombineLargeContiguousImpl32(uint64_t state, const unsigned char* first, size_t len); + static uint64_t CombineLargeContiguousImpl64(uint64_t state, const unsigned char* first, size_t len); + + // Reads 9 to 16 bytes from p. + // The least significant 8 bytes are in .first, the rest (zero padded) bytes + // are in .second. + static std::pair Read9To16(const unsigned char* p, size_t len) + { + uint64_t low_mem = absl::base_internal::UnalignedLoad64(p); + uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8); +#ifdef ABSL_IS_LITTLE_ENDIAN + uint64_t most_significant = high_mem; + uint64_t least_significant = low_mem; +#else + uint64_t most_significant = low_mem; + uint64_t least_significant = high_mem; +#endif + return {least_significant, most_significant}; + } + + // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t. + static uint64_t Read4To8(const unsigned char* p, size_t len) + { + uint32_t low_mem = absl::base_internal::UnalignedLoad32(p); + uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4); +#ifdef ABSL_IS_LITTLE_ENDIAN + uint32_t most_significant = high_mem; + uint32_t least_significant = low_mem; +#else + uint32_t most_significant = low_mem; + uint32_t least_significant = high_mem; +#endif + return (static_cast(most_significant) << (len - 4) * 8) | + least_significant; + } + + // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t. + static uint32_t Read1To3(const unsigned char* p, size_t len) + { + // The trick used by this implementation is to avoid branches if possible. + unsigned char mem0 = p[0]; + unsigned char mem1 = p[len / 2]; + unsigned char mem2 = p[len - 1]; +#ifdef ABSL_IS_LITTLE_ENDIAN + unsigned char significant2 = mem2; + unsigned char significant1 = mem1; + unsigned char significant0 = mem0; +#else + unsigned char significant2 = mem0; + unsigned char significant1 = len == 2 ? mem0 : mem1; + unsigned char significant0 = mem2; +#endif + return static_cast(significant0 | // + (significant1 << (len / 2 * 8)) | // + (significant2 << ((len - 1) * 8))); + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) + { + // Though the 128-bit product on AArch64 needs two instructions, it is + // still a good balance between speed and hash quality. + using MultType = + absl::conditional_t; + // We do the addition in 64-bit space to make sure the 128-bit + // multiplication is fast. If we were to do it as MultType the compiler has + // to assume that the high word is non-zero and needs to perform 2 + // multiplications instead of one. + MultType m = state + v; + m *= kMul; + return static_cast(m ^ (m >> (sizeof(m) * 8 / 2))); + } + + // An extern to avoid bloat on a direct call to LowLevelHash() with fixed + // values for both the seed and salt parameters. + static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len); + + ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data, size_t len) + { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return LowLevelHashImpl(data, len); +#else + return hash_internal::CityHash64(reinterpret_cast(data), len); +#endif + } + + // Seed() + // + // A non-deterministic seed. + // + // The current purpose of this seed is to generate non-deterministic results + // and prevent having users depend on the particular hash values. + // It is not meant as a security feature right now, but it leaves the door + // open to upgrade it to a true per-process random seed. A true random seed + // costs more and we don't need to pay for that right now. + // + // On platforms with ASLR, we take advantage of it to make a per-process + // random value. + // See https://en.wikipedia.org/wiki/Address_space_layout_randomization + // + // On other platforms this is still going to be non-deterministic but most + // probably per-build and not per-process. + ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() + { +#if (!defined(__clang__) || __clang_major__ > 11) && \ + (!defined(__apple_build_version__) || \ + __apple_build_version__ >= 19558921) // Xcode 12 + return static_cast(reinterpret_cast(&kSeed)); +#else + // Workaround the absence of + // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021. + return static_cast(reinterpret_cast(kSeed)); +#endif + } + static const void* const kSeed; + + uint64_t state_; + }; + + // MixingHashState::CombineContiguousImpl() + inline uint64_t MixingHashState::CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */ + ) + { + // For large values we use CityHash, for small ones we just use a + // multiplicative hash. + uint64_t v; + if (len > 8) + { + if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) + { + return CombineLargeContiguousImpl32(state, first, len); + } + v = hash_internal::CityHash32(reinterpret_cast(first), len); + } + else if (len >= 4) + { + v = Read4To8(first, len); + } + else if (len > 0) + { + v = Read1To3(first, len); + } + else + { + // Empty ranges have no effect. + return state; + } + return Mix(state, v); + } + + // Overload of MixingHashState::CombineContiguousImpl() + inline uint64_t MixingHashState::CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */ + ) + { + // For large values we use LowLevelHash or CityHash depending on the platform, + // for small ones we just use a multiplicative hash. + uint64_t v; + if (len > 16) + { + if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) + { + return CombineLargeContiguousImpl64(state, first, len); + } + v = Hash64(first, len); + } + else if (len > 8) + { + // This hash function was constructed by the ML-driven algorithm discovery + // using reinforcement learning. We fed the agent lots of inputs from + // microbenchmarks, SMHasher, low hamming distance from generated inputs and + // picked up the one that was good on micro and macrobenchmarks. + auto p = Read9To16(first, len); + uint64_t lo = p.first; + uint64_t hi = p.second; + // Rotation by 53 was found to be most often useful when discovering these + // hashing algorithms with ML techniques. + lo = absl::rotr(lo, 53); + state += kMul; + lo += state; + state ^= hi; + uint128 m = state; + m *= lo; + return static_cast(m ^ (m >> 64)); + } + else if (len >= 4) + { + v = Read4To8(first, len); + } + else if (len > 0) + { + v = Read1To3(first, len); + } + else + { + // Empty ranges have no effect. + return state; + } + return Mix(state, v); + } + + struct AggregateBarrier + { + }; + + // HashImpl + + // Add a private base class to make sure this type is not an aggregate. + // Aggregates can be aggregate initialized even if the default constructor is + // deleted. + struct PoisonedHash : private AggregateBarrier + { + PoisonedHash() = delete; + PoisonedHash(const PoisonedHash&) = delete; + PoisonedHash& operator=(const PoisonedHash&) = delete; + }; + + template + struct HashImpl + { + size_t operator()(const T& value) const + { + return MixingHashState::hash(value); + } + }; + + template + struct Hash : absl::conditional_t::value, HashImpl, PoisonedHash> + { + }; + + template + template + H HashStateBase::combine(H state, const T& value, const Ts&... values) + { + return H::combine(hash_internal::HashSelect::template Apply::Invoke(std::move(state), value), values...); + } + + // HashStateBase::combine_contiguous() + template + template + H HashStateBase::combine_contiguous(H state, const T* data, size_t size) + { + return hash_internal::hash_range_or_bytes(std::move(state), data, size); + } + + // HashStateBase::combine_unordered() + template + template + H HashStateBase::combine_unordered(H state, I begin, I end) + { + return H::RunCombineUnordered(std::move(state), CombineUnorderedCallback{begin, end}); + } + + // HashStateBase::PiecewiseCombiner::add_buffer() + template + H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, size_t size) + { + if (position_ + size < PiecewiseChunkSize()) + { + // This partial chunk does not fill our existing buffer + memcpy(buf_ + position_, data, size); + position_ += size; + return state; + } + + // If the buffer is partially filled we need to complete the buffer + // and hash it. + if (position_ != 0) + { + const size_t bytes_needed = PiecewiseChunkSize() - position_; + memcpy(buf_ + position_, data, bytes_needed); + state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize()); + data += bytes_needed; + size -= bytes_needed; + } + + // Hash whatever chunks we can without copying + while (size >= PiecewiseChunkSize()) + { + state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize()); + data += PiecewiseChunkSize(); + size -= PiecewiseChunkSize(); + } + // Fill the buffer with the remainder + memcpy(buf_, data, size); + position_ = size; + return state; + } + + // HashStateBase::PiecewiseCombiner::finalize() + template + H PiecewiseCombiner::finalize(H state) + { + // Hash the remainder left in the buffer, which may be empty + return H::combine_contiguous(std::move(state), buf_, position_); + } + + } // namespace hash_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_HASH_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/internal/hash_test.h b/CAPI/cpp/grpc/include/absl/hash/internal/hash_test.h new file mode 100644 index 00000000..693ab40a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/internal/hash_test.h @@ -0,0 +1,109 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Common code shared between absl/hash/hash_test.cc and +// absl/hash/hash_instantiated_test.cc. + +#ifndef ABSL_HASH_INTERNAL_HASH_TEST_H_ +#define ABSL_HASH_INTERNAL_HASH_TEST_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace hash_test_internal + { + + // Utility wrapper of T for the purposes of testing the `AbslHash` type erasure + // mechanism. `TypeErasedValue` can be constructed with a `T`, and can + // be compared and hashed. However, all hashing goes through the hashing + // type-erasure framework. + template + class TypeErasedValue + { + public: + TypeErasedValue() = default; + TypeErasedValue(const TypeErasedValue&) = default; + TypeErasedValue(TypeErasedValue&&) = default; + explicit TypeErasedValue(const T& n) : + n_(n) + { + } + + template + friend H AbslHashValue(H hash_state, const TypeErasedValue& v) + { + v.HashValue(absl::HashState::Create(&hash_state)); + return hash_state; + } + + void HashValue(absl::HashState state) const + { + absl::HashState::combine(std::move(state), n_); + } + + bool operator==(const TypeErasedValue& rhs) const + { + return n_ == rhs.n_; + } + bool operator!=(const TypeErasedValue& rhs) const + { + return !(*this == rhs); + } + + private: + T n_; + }; + + // A TypeErasedValue refinement, for containers. It exposes the wrapped + // `value_type` and is constructible from an initializer list. + template + class TypeErasedContainer : public TypeErasedValue + { + public: + using value_type = typename T::value_type; + TypeErasedContainer() = default; + TypeErasedContainer(const TypeErasedContainer&) = default; + TypeErasedContainer(TypeErasedContainer&&) = default; + explicit TypeErasedContainer(const T& n) : + TypeErasedValue(n) + { + } + TypeErasedContainer(std::initializer_list init_list) : + TypeErasedContainer(T(init_list.begin(), init_list.end())) + { + } + // one-argument constructor of value type T, to appease older toolchains that + // get confused by one-element initializer lists in some contexts + explicit TypeErasedContainer(const value_type& v) : + TypeErasedContainer(T(&v, &v + 1)) + { + } + }; + + // Helper trait to verify if T is hashable. We use absl::Hash's poison status to + // detect it. + template + using is_hashable = std::is_default_constructible>; + + } // namespace hash_test_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_HASH_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/internal/low_level_hash.h b/CAPI/cpp/grpc/include/absl/hash/internal/low_level_hash.h new file mode 100644 index 00000000..025c4777 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/internal/low_level_hash.h @@ -0,0 +1,51 @@ +// Copyright 2020 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file provides the Google-internal implementation of LowLevelHash. +// +// LowLevelHash is a fast hash function for hash tables, the fastest we've +// currently (late 2020) found that passes the SMHasher tests. The algorithm +// relies on intrinsic 128-bit multiplication for speed. This is not meant to be +// secure - just fast. +// +// It is closely based on a version of wyhash, but does not maintain or +// guarantee future compatibility with it. + +#ifndef ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ +#define ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace hash_internal + { + + // Hash function for a byte array. A 64-bit seed and a set of five 64-bit + // integers are hashed into the result. + // + // To allow all hashable types (including string_view and Span) to depend on + // this algorithm, we keep the API low-level, with as few dependencies as + // possible. + uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, const uint64_t salt[5]); + + } // namespace hash_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/internal/spy_hash_state.h b/CAPI/cpp/grpc/include/absl/hash/internal/spy_hash_state.h new file mode 100644 index 00000000..bb95e366 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/internal/spy_hash_state.h @@ -0,0 +1,299 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ +#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ + +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/match.h" +#include "absl/strings/str_format.h" +#include "absl/strings/str_join.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace hash_internal + { + + // SpyHashState is an implementation of the HashState API that simply + // accumulates all input bytes in an internal buffer. This makes it useful + // for testing AbslHashValue overloads (so long as they are templated on the + // HashState parameter), since it can report the exact hash representation + // that the AbslHashValue overload produces. + // + // Sample usage: + // EXPECT_EQ(SpyHashState::combine(SpyHashState(), foo), + // SpyHashState::combine(SpyHashState(), bar)); + template + class SpyHashStateImpl : public HashStateBase> + { + public: + SpyHashStateImpl() : + error_(std::make_shared>()) + { + static_assert(std::is_void::value, ""); + } + + // Move-only + SpyHashStateImpl(const SpyHashStateImpl&) = delete; + SpyHashStateImpl& operator=(const SpyHashStateImpl&) = delete; + + SpyHashStateImpl(SpyHashStateImpl&& other) noexcept + { + *this = std::move(other); + } + + SpyHashStateImpl& operator=(SpyHashStateImpl&& other) noexcept + { + hash_representation_ = std::move(other.hash_representation_); + error_ = other.error_; + moved_from_ = other.moved_from_; + other.moved_from_ = true; + return *this; + } + + template + SpyHashStateImpl(SpyHashStateImpl&& other) + { // NOLINT + hash_representation_ = std::move(other.hash_representation_); + error_ = other.error_; + moved_from_ = other.moved_from_; + other.moved_from_ = true; + } + + template + static SpyHashStateImpl combine(SpyHashStateImpl s, const A& a, const Args&... args) + { + // Pass an instance of SpyHashStateImpl when trying to combine `A`. This + // allows us to test that the user only uses this instance for combine calls + // and does not call AbslHashValue directly. + // See AbslHashValue implementation at the bottom. + s = SpyHashStateImpl::HashStateBase::combine(std::move(s), a); + return SpyHashStateImpl::combine(std::move(s), args...); + } + static SpyHashStateImpl combine(SpyHashStateImpl s) + { + if (direct_absl_hash_value_error_) + { + *s.error_ = "AbslHashValue should not be invoked directly."; + } + else if (s.moved_from_) + { + *s.error_ = "Used moved-from instance of the hash state object."; + } + return s; + } + + static void SetDirectAbslHashValueError() + { + direct_absl_hash_value_error_ = true; + } + + // Two SpyHashStateImpl objects are equal if they hold equal hash + // representations. + friend bool operator==(const SpyHashStateImpl& lhs, const SpyHashStateImpl& rhs) + { + return lhs.hash_representation_ == rhs.hash_representation_; + } + + friend bool operator!=(const SpyHashStateImpl& lhs, const SpyHashStateImpl& rhs) + { + return !(lhs == rhs); + } + + enum class CompareResult + { + kEqual, + kASuffixB, + kBSuffixA, + kUnequal, + }; + + static CompareResult Compare(const SpyHashStateImpl& a, const SpyHashStateImpl& b) + { + const std::string a_flat = absl::StrJoin(a.hash_representation_, ""); + const std::string b_flat = absl::StrJoin(b.hash_representation_, ""); + if (a_flat == b_flat) + return CompareResult::kEqual; + if (absl::EndsWith(a_flat, b_flat)) + return CompareResult::kBSuffixA; + if (absl::EndsWith(b_flat, a_flat)) + return CompareResult::kASuffixB; + return CompareResult::kUnequal; + } + + // operator<< prints the hash representation as a hex and ASCII dump, to + // facilitate debugging. + friend std::ostream& operator<<(std::ostream& out, const SpyHashStateImpl& hash_state) + { + out << "[\n"; + for (auto& s : hash_state.hash_representation_) + { + size_t offset = 0; + for (char c : s) + { + if (offset % 16 == 0) + { + out << absl::StreamFormat("\n0x%04x: ", offset); + } + if (offset % 2 == 0) + { + out << " "; + } + out << absl::StreamFormat("%02x", c); + ++offset; + } + out << "\n"; + } + return out << "]"; + } + + // The base case of the combine recursion, which writes raw bytes into the + // internal buffer. + static SpyHashStateImpl combine_contiguous(SpyHashStateImpl hash_state, const unsigned char* begin, size_t size) + { + const size_t large_chunk_stride = PiecewiseChunkSize(); + if (size > large_chunk_stride) + { + // Combining a large contiguous buffer must have the same effect as + // doing it piecewise by the stride length, followed by the (possibly + // empty) remainder. + while (size >= large_chunk_stride) + { + hash_state = SpyHashStateImpl::combine_contiguous( + std::move(hash_state), begin, large_chunk_stride + ); + begin += large_chunk_stride; + size -= large_chunk_stride; + } + } + + hash_state.hash_representation_.emplace_back( + reinterpret_cast(begin), size + ); + return hash_state; + } + + using SpyHashStateImpl::HashStateBase::combine_contiguous; + + template + static SpyHashStateImpl RunCombineUnordered(SpyHashStateImpl state, CombinerT combiner) + { + UnorderedCombinerCallback cb; + + combiner(SpyHashStateImpl{}, std::ref(cb)); + + std::sort(cb.element_hash_representations.begin(), cb.element_hash_representations.end()); + state.hash_representation_.insert(state.hash_representation_.end(), cb.element_hash_representations.begin(), cb.element_hash_representations.end()); + if (cb.error && cb.error->has_value()) + { + state.error_ = std::move(cb.error); + } + return state; + } + + absl::optional error() const + { + if (moved_from_) + { + return "Returned a moved-from instance of the hash state object."; + } + return *error_; + } + + private: + template + friend class SpyHashStateImpl; + + struct UnorderedCombinerCallback + { + std::vector element_hash_representations; + std::shared_ptr> error; + + // The inner spy can have a different type. + template + void operator()(SpyHashStateImpl& inner) + { + element_hash_representations.push_back( + absl::StrJoin(inner.hash_representation_, "") + ); + if (inner.error_->has_value()) + { + error = std::move(inner.error_); + } + inner = SpyHashStateImpl{}; + } + }; + + // This is true if SpyHashStateImpl has been passed to a call of + // AbslHashValue with the wrong type. This detects that the user called + // AbslHashValue directly (because the hash state type does not match). + static bool direct_absl_hash_value_error_; + + std::vector hash_representation_; + // This is a shared_ptr because we want all instances of the particular + // SpyHashState run to share the field. This way we can set the error for + // use-after-move and all the copies will see it. + std::shared_ptr> error_; + bool moved_from_ = false; + }; + + template + bool SpyHashStateImpl::direct_absl_hash_value_error_; + + template + struct OdrUse + { + constexpr OdrUse() + { + } + bool& b = B; + }; + + template + struct RunOnStartup + { + static bool run; + static constexpr OdrUse kOdrUse{}; + }; + + template + bool RunOnStartup::run = (f(), true); + + template< + typename T, + typename U, + // Only trigger for when (T != U), + typename = absl::enable_if_t::value>, + // This statement works in two ways: + // - First, it instantiates RunOnStartup and forces the initialization of + // `run`, which set the global variable. + // - Second, it triggers a SFINAE error disabling the overload to prevent + // compile time errors. If we didn't disable the overload we would get + // ambiguous overload errors, which we don't want. + int = RunOnStartup::SetDirectAbslHashValueError>::run> + void AbslHashValue(SpyHashStateImpl, const U&); + + using SpyHashState = SpyHashStateImpl; + + } // namespace hash_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_ diff --git a/CAPI/cpp/grpc/include/absl/memory/memory.h b/CAPI/cpp/grpc/include/absl/memory/memory.h new file mode 100644 index 00000000..1b84ecd0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/memory/memory.h @@ -0,0 +1,301 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: memory.h +// ----------------------------------------------------------------------------- +// +// This header file contains utility functions for managing the creation and +// conversion of smart pointers. This file is an extension to the C++ +// standard library header file. + +#ifndef ABSL_MEMORY_MEMORY_H_ +#define ABSL_MEMORY_MEMORY_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // Function Template: WrapUnique() + // ----------------------------------------------------------------------------- + // + // Adopts ownership from a raw pointer and transfers it to the returned + // `std::unique_ptr`, whose type is deduced. Because of this deduction, *do not* + // specify the template type `T` when calling `WrapUnique`. + // + // Example: + // X* NewX(int, int); + // auto x = WrapUnique(NewX(1, 2)); // 'x' is std::unique_ptr. + // + // Do not call WrapUnique with an explicit type, as in + // `WrapUnique(NewX(1, 2))`. The purpose of WrapUnique is to automatically + // deduce the pointer type. If you wish to make the type explicit, just use + // `std::unique_ptr` directly. + // + // auto x = std::unique_ptr(NewX(1, 2)); + // - or - + // std::unique_ptr x(NewX(1, 2)); + // + // While `absl::WrapUnique` is useful for capturing the output of a raw + // pointer factory, prefer 'absl::make_unique(args...)' over + // 'absl::WrapUnique(new T(args...))'. + // + // auto x = WrapUnique(new X(1, 2)); // works, but nonideal. + // auto x = make_unique(1, 2); // safer, standard, avoids raw 'new'. + // + // Note that `absl::WrapUnique(p)` is valid only if `delete p` is a valid + // expression. In particular, `absl::WrapUnique()` cannot wrap pointers to + // arrays, functions or void, and it must not be used to capture pointers + // obtained from array-new expressions (even though that would compile!). + template + std::unique_ptr WrapUnique(T* ptr) + { + static_assert(!std::is_array::value, "array types are unsupported"); + static_assert(std::is_object::value, "non-object types are unsupported"); + return std::unique_ptr(ptr); + } + + // ----------------------------------------------------------------------------- + // Function Template: make_unique() + // ----------------------------------------------------------------------------- + // + // Creates a `std::unique_ptr<>`, while avoiding issues creating temporaries + // during the construction process. `absl::make_unique<>` also avoids redundant + // type declarations, by avoiding the need to explicitly use the `new` operator. + // + // https://en.cppreference.com/w/cpp/memory/unique_ptr/make_unique + // + // For more background on why `std::unique_ptr(new T(a,b))` is problematic, + // see Herb Sutter's explanation on + // (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/]. + // (In general, reviewers should treat `new T(a,b)` with scrutiny.) + // + // Historical note: Abseil once provided a C++11 compatible implementation of + // the C++14's `std::make_unique`. Now that C++11 support has been sunsetted, + // `absl::make_unique` simply uses the STL-provided implementation. New code + // should use `std::make_unique`. + using std::make_unique; + + // ----------------------------------------------------------------------------- + // Function Template: RawPtr() + // ----------------------------------------------------------------------------- + // + // Extracts the raw pointer from a pointer-like value `ptr`. `absl::RawPtr` is + // useful within templates that need to handle a complement of raw pointers, + // `std::nullptr_t`, and smart pointers. + template + auto RawPtr(T&& ptr) -> decltype(std::addressof(*ptr)) + { + // ptr is a forwarding reference to support Ts with non-const operators. + return (ptr != nullptr) ? std::addressof(*ptr) : nullptr; + } + inline std::nullptr_t RawPtr(std::nullptr_t) + { + return nullptr; + } + + // ----------------------------------------------------------------------------- + // Function Template: ShareUniquePtr() + // ----------------------------------------------------------------------------- + // + // Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced + // type. Ownership (if any) of the held value is transferred to the returned + // shared pointer. + // + // Example: + // + // auto up = absl::make_unique(10); + // auto sp = absl::ShareUniquePtr(std::move(up)); // shared_ptr + // CHECK_EQ(*sp, 10); + // CHECK(up == nullptr); + // + // Note that this conversion is correct even when T is an array type, and more + // generally it works for *any* deleter of the `unique_ptr` (single-object + // deleter, array deleter, or any custom deleter), since the deleter is adopted + // by the shared pointer as well. The deleter is copied (unless it is a + // reference). + // + // Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a + // null shared pointer does not attempt to call the deleter. + template + std::shared_ptr ShareUniquePtr(std::unique_ptr&& ptr) + { + return ptr ? std::shared_ptr(std::move(ptr)) : std::shared_ptr(); + } + + // ----------------------------------------------------------------------------- + // Function Template: WeakenPtr() + // ----------------------------------------------------------------------------- + // + // Creates a weak pointer associated with a given shared pointer. The returned + // value is a `std::weak_ptr` of deduced type. + // + // Example: + // + // auto sp = std::make_shared(10); + // auto wp = absl::WeakenPtr(sp); + // CHECK_EQ(sp.get(), wp.lock().get()); + // sp.reset(); + // CHECK(wp.lock() == nullptr); + // + template + std::weak_ptr WeakenPtr(const std::shared_ptr& ptr) + { + return std::weak_ptr(ptr); + } + + // ----------------------------------------------------------------------------- + // Class Template: pointer_traits + // ----------------------------------------------------------------------------- + // + // Historical note: Abseil once provided an implementation of + // `std::pointer_traits` for platforms that had not yet provided it. Those + // platforms are no longer supported. New code should simply use + // `std::pointer_traits`. + using std::pointer_traits; + + // ----------------------------------------------------------------------------- + // Class Template: allocator_traits + // ----------------------------------------------------------------------------- + // + // Historical note: Abseil once provided an implementation of + // `std::allocator_traits` for platforms that had not yet provided it. Those + // platforms are no longer supported. New code should simply use + // `std::allocator_traits`. + using std::allocator_traits; + + namespace memory_internal + { + + // ExtractOr::type evaluates to E if possible. Otherwise, D. + template class Extract, typename Obj, typename Default, typename> + struct ExtractOr + { + using type = Default; + }; + + template class Extract, typename Obj, typename Default> + struct ExtractOr>> + { + using type = Extract; + }; + + template class Extract, typename Obj, typename Default> + using ExtractOrT = typename ExtractOr::type; + + // This template alias transforms Alloc::is_nothrow into a metafunction with + // Alloc as a parameter so it can be used with ExtractOrT<>. + template + using GetIsNothrow = typename Alloc::is_nothrow; + + } // namespace memory_internal + + // ABSL_ALLOCATOR_NOTHROW is a build time configuration macro for user to + // specify whether the default allocation function can throw or never throws. + // If the allocation function never throws, user should define it to a non-zero + // value (e.g. via `-DABSL_ALLOCATOR_NOTHROW`). + // If the allocation function can throw, user should leave it undefined or + // define it to zero. + // + // allocator_is_nothrow is a traits class that derives from + // Alloc::is_nothrow if present, otherwise std::false_type. It's specialized + // for Alloc = std::allocator for any type T according to the state of + // ABSL_ALLOCATOR_NOTHROW. + // + // default_allocator_is_nothrow is a class that derives from std::true_type + // when the default allocator (global operator new) never throws, and + // std::false_type when it can throw. It is a convenience shorthand for writing + // allocator_is_nothrow> (T can be any type). + // NOTE: allocator_is_nothrow> is guaranteed to derive from + // the same type for all T, because users should specialize neither + // allocator_is_nothrow nor std::allocator. + template + struct allocator_is_nothrow : memory_internal::ExtractOrT + { + }; + +#if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW + template + struct allocator_is_nothrow> : std::true_type + { + }; + struct default_allocator_is_nothrow : std::true_type + { + }; +#else + struct default_allocator_is_nothrow : std::false_type + { + }; +#endif + + namespace memory_internal + { + template + void ConstructRange(Allocator& alloc, Iterator first, Iterator last, const Args&... args) + { + for (Iterator cur = first; cur != last; ++cur) + { + ABSL_INTERNAL_TRY + { + std::allocator_traits::construct(alloc, std::addressof(*cur), args...); + } + ABSL_INTERNAL_CATCH_ANY + { + while (cur != first) + { + --cur; + std::allocator_traits::destroy(alloc, std::addressof(*cur)); + } + ABSL_INTERNAL_RETHROW; + } + } + } + + template + void CopyRange(Allocator& alloc, Iterator destination, InputIterator first, InputIterator last) + { + for (Iterator cur = destination; first != last; + static_cast(++cur), static_cast(++first)) + { + ABSL_INTERNAL_TRY + { + std::allocator_traits::construct(alloc, std::addressof(*cur), *first); + } + ABSL_INTERNAL_CATCH_ANY + { + while (cur != destination) + { + --cur; + std::allocator_traits::destroy(alloc, std::addressof(*cur)); + } + ABSL_INTERNAL_RETHROW; + } + } + } + } // namespace memory_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_MEMORY_MEMORY_H_ diff --git a/CAPI/cpp/grpc/include/absl/meta/type_traits.h b/CAPI/cpp/grpc/include/absl/meta/type_traits.h new file mode 100644 index 00000000..9c216f0d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/meta/type_traits.h @@ -0,0 +1,612 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// type_traits.h +// ----------------------------------------------------------------------------- +// +// This file contains C++11-compatible versions of standard API +// functions for determining the characteristics of types. Such traits can +// support type inference, classification, and transformation, as well as +// make it easier to write templates based on generic type behavior. +// +// See https://en.cppreference.com/w/cpp/header/type_traits +// +// WARNING: use of many of the constructs in this header will count as "complex +// template metaprogramming", so before proceeding, please carefully consider +// https://google.github.io/styleguide/cppguide.html#Template_metaprogramming +// +// WARNING: using template metaprogramming to detect or depend on API +// features is brittle and not guaranteed. Neither the standard library nor +// Abseil provides any guarantee that APIs are stable in the face of template +// metaprogramming. Use with caution. +#ifndef ABSL_META_TYPE_TRAITS_H_ +#define ABSL_META_TYPE_TRAITS_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17 +// feature. +#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) +#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__ +#else // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) +#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t) +#endif // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace type_traits_internal + { + + template + struct VoidTImpl + { + using type = void; + }; + + //////////////////////////////// + // Library Fundamentals V2 TS // + //////////////////////////////// + + // NOTE: The `is_detected` family of templates here differ from the library + // fundamentals specification in that for library fundamentals, `Op` is + // evaluated as soon as the type `is_detected` undergoes + // substitution, regardless of whether or not the `::value` is accessed. That + // is inconsistent with all other standard traits and prevents lazy evaluation + // in larger contexts (such as if the `is_detected` check is a trailing argument + // of a `conjunction`. This implementation opts to instead be lazy in the same + // way that the standard traits are (this "defect" of the detection idiom + // specifications has been reported). + + template class Op, class... Args> + struct is_detected_impl + { + using type = std::false_type; + }; + + template class Op, class... Args> + struct is_detected_impl>::type, Op, Args...> + { + using type = std::true_type; + }; + + template class Op, class... Args> + struct is_detected : is_detected_impl::type + { + }; + + template class Op, class... Args> + struct is_detected_convertible_impl + { + using type = std::false_type; + }; + + template class Op, class... Args> + struct is_detected_convertible_impl< + typename std::enable_if, To>::value>::type, + To, + Op, + Args...> + { + using type = std::true_type; + }; + + template class Op, class... Args> + struct is_detected_convertible : is_detected_convertible_impl::type + { + }; + + } // namespace type_traits_internal + + // void_t() + // + // Ignores the type of any its arguments and returns `void`. In general, this + // metafunction allows you to create a general case that maps to `void` while + // allowing specializations that map to specific types. + // + // This metafunction is designed to be a drop-in replacement for the C++17 + // `std::void_t` metafunction. + // + // NOTE: `absl::void_t` does not use the standard-specified implementation so + // that it can remain compatible with gcc < 5.1. This can introduce slightly + // different behavior, such as when ordering partial specializations. + template + using void_t = typename type_traits_internal::VoidTImpl::type; + + // conjunction + // + // Performs a compile-time logical AND operation on the passed types (which + // must have `::value` members convertible to `bool`. Short-circuits if it + // encounters any `false` members (and does not compare the `::value` members + // of any remaining arguments). + // + // This metafunction is designed to be a drop-in replacement for the C++17 + // `std::conjunction` metafunction. + template + struct conjunction : std::true_type + { + }; + + template + struct conjunction : std::conditional, T>::type + { + }; + + template + struct conjunction : T + { + }; + + // disjunction + // + // Performs a compile-time logical OR operation on the passed types (which + // must have `::value` members convertible to `bool`. Short-circuits if it + // encounters any `true` members (and does not compare the `::value` members + // of any remaining arguments). + // + // This metafunction is designed to be a drop-in replacement for the C++17 + // `std::disjunction` metafunction. + template + struct disjunction : std::false_type + { + }; + + template + struct disjunction : + std::conditional>::type + { + }; + + template + struct disjunction : T + { + }; + + // negation + // + // Performs a compile-time logical NOT operation on the passed type (which + // must have `::value` members convertible to `bool`. + // + // This metafunction is designed to be a drop-in replacement for the C++17 + // `std::negation` metafunction. + template + struct negation : std::integral_constant + { + }; + + // is_function() + // + // Determines whether the passed type `T` is a function type. + // + // This metafunction is designed to be a drop-in replacement for the C++11 + // `std::is_function()` metafunction for platforms that have incomplete C++11 + // support (such as libstdc++ 4.x). + // + // This metafunction works because appending `const` to a type does nothing to + // function types and reference types (and forms a const-qualified type + // otherwise). + template + struct is_function : std::integral_constant::value || std::is_const::type>::value)> + { + }; + + // is_copy_assignable() + // is_move_assignable() + // is_trivially_destructible() + // is_trivially_default_constructible() + // is_trivially_move_constructible() + // is_trivially_copy_constructible() + // is_trivially_move_assignable() + // is_trivially_copy_assignable() + // + // Historical note: Abseil once provided implementations of these type traits + // for platforms that lacked full support. New code should prefer to use the + // std variants. + // + // See the documentation for the STL header for more information: + // https://en.cppreference.com/w/cpp/header/type_traits + using std::is_copy_assignable; + using std::is_move_assignable; + using std::is_trivially_copy_assignable; + using std::is_trivially_copy_constructible; + using std::is_trivially_default_constructible; + using std::is_trivially_destructible; + using std::is_trivially_move_assignable; + using std::is_trivially_move_constructible; + +#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L + template + using remove_cvref = std::remove_cvref; + + template + using remove_cvref_t = typename std::remove_cvref::type; +#else + // remove_cvref() + // + // C++11 compatible implementation of std::remove_cvref which was added in + // C++20. + template + struct remove_cvref + { + using type = + typename std::remove_cv::type>::type; + }; + + template + using remove_cvref_t = typename remove_cvref::type; +#endif + + // ----------------------------------------------------------------------------- + // C++14 "_t" trait aliases + // ----------------------------------------------------------------------------- + + template + using remove_cv_t = typename std::remove_cv::type; + + template + using remove_const_t = typename std::remove_const::type; + + template + using remove_volatile_t = typename std::remove_volatile::type; + + template + using add_cv_t = typename std::add_cv::type; + + template + using add_const_t = typename std::add_const::type; + + template + using add_volatile_t = typename std::add_volatile::type; + + template + using remove_reference_t = typename std::remove_reference::type; + + template + using add_lvalue_reference_t = typename std::add_lvalue_reference::type; + + template + using add_rvalue_reference_t = typename std::add_rvalue_reference::type; + + template + using remove_pointer_t = typename std::remove_pointer::type; + + template + using add_pointer_t = typename std::add_pointer::type; + + template + using make_signed_t = typename std::make_signed::type; + + template + using make_unsigned_t = typename std::make_unsigned::type; + + template + using remove_extent_t = typename std::remove_extent::type; + + template + using remove_all_extents_t = typename std::remove_all_extents::type; + + ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING + namespace type_traits_internal + { + // This trick to retrieve a default alignment is necessary for our + // implementation of aligned_storage_t to be consistent with any + // implementation of std::aligned_storage. + template> + struct default_alignment_of_aligned_storage; + + template + struct default_alignment_of_aligned_storage< + Len, + std::aligned_storage> + { + static constexpr size_t value = Align; + }; + } // namespace type_traits_internal + + // TODO(b/260219225): std::aligned_storage(_t) is deprecated in C++23. + template::value> + using aligned_storage_t = typename std::aligned_storage::type; + ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING + + template + using decay_t = typename std::decay::type; + + template + using enable_if_t = typename std::enable_if::type; + + template + using conditional_t = typename std::conditional::type; + + template + using common_type_t = typename std::common_type::type; + + template + using underlying_type_t = typename std::underlying_type::type; + + namespace type_traits_internal + { + +#if (defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L) || \ + (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) + // std::result_of is deprecated (C++17) or removed (C++20) + template + struct result_of; + template + struct result_of : std::invoke_result + { + }; +#else + template + using result_of = std::result_of; +#endif + + } // namespace type_traits_internal + + template + using result_of_t = typename type_traits_internal::result_of::type; + + namespace type_traits_internal + { +// In MSVC we can't probe std::hash or stdext::hash because it triggers a +// static_assert instead of failing substitution. Libc++ prior to 4.0 +// also used a static_assert. +// +#if defined(_MSC_VER) || (defined(_LIBCPP_VERSION) && \ + _LIBCPP_VERSION < 4000 && _LIBCPP_STD_VER > 11) +#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 0 +#else +#define ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ 1 +#endif + +#if !ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ + template + struct IsHashable : std::true_type + { + }; +#else // ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ + template + struct IsHashable : std::false_type + { + }; + + template + struct IsHashable< + Key, + absl::enable_if_t&>()(std::declval())), + std::size_t>::value>> : std::true_type + { + }; +#endif // !ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ + + struct AssertHashEnabledHelper + { + private: + static void Sink(...) + { + } + struct NAT + { + }; + + template + static auto GetReturnType(int) + -> decltype(std::declval>()(std::declval())); + template + static NAT GetReturnType(...); + + template + static std::nullptr_t DoIt() + { + static_assert(IsHashable::value, "std::hash does not provide a call operator"); + static_assert( + std::is_default_constructible>::value, + "std::hash must be default constructible when it is enabled" + ); + static_assert( + std::is_copy_constructible>::value, + "std::hash must be copy constructible when it is enabled" + ); + static_assert(absl::is_copy_assignable>::value, "std::hash must be copy assignable when it is enabled"); + // is_destructible is unchecked as it's implied by each of the + // is_constructible checks. + using ReturnType = decltype(GetReturnType(0)); + static_assert(std::is_same::value || std::is_same::value, "std::hash must return size_t"); + return nullptr; + } + + template + friend void AssertHashEnabled(); + }; + + template + inline void AssertHashEnabled() + { + using Helper = AssertHashEnabledHelper; + Helper::Sink(Helper::DoIt()...); + } + + } // namespace type_traits_internal + + // An internal namespace that is required to implement the C++17 swap traits. + // It is not further nested in type_traits_internal to avoid long symbol names. + namespace swap_internal + { + + // Necessary for the traits. + using std::swap; + + // This declaration prevents global `swap` and `absl::swap` overloads from being + // considered unless ADL picks them up. + void swap(); + + template + using IsSwappableImpl = decltype(swap(std::declval(), std::declval())); + + // NOTE: This dance with the default template parameter is for MSVC. + template(), std::declval()))>> + using IsNothrowSwappableImpl = typename std::enable_if::type; + + // IsSwappable + // + // Determines whether the standard swap idiom is a valid expression for + // arguments of type `T`. + template + struct IsSwappable : absl::type_traits_internal::is_detected + { + }; + + // IsNothrowSwappable + // + // Determines whether the standard swap idiom is a valid expression for + // arguments of type `T` and is noexcept. + template + struct IsNothrowSwappable : absl::type_traits_internal::is_detected + { + }; + + // Swap() + // + // Performs the swap idiom from a namespace where valid candidates may only be + // found in `std` or via ADL. + template::value, int> = 0> + void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable::value) + { + swap(lhs, rhs); + } + + // StdSwapIsUnconstrained + // + // Some standard library implementations are broken in that they do not + // constrain `std::swap`. This will effectively tell us if we are dealing with + // one of those implementations. + using StdSwapIsUnconstrained = IsSwappable; + + } // namespace swap_internal + + namespace type_traits_internal + { + + // Make the swap-related traits/function accessible from this namespace. + using swap_internal::IsNothrowSwappable; + using swap_internal::IsSwappable; + using swap_internal::StdSwapIsUnconstrained; + using swap_internal::Swap; + + } // namespace type_traits_internal + +// absl::is_trivially_relocatable +// +// Detects whether a type is known to be "trivially relocatable" -- meaning it +// can be relocated without invoking the constructor/destructor, using a form of +// move elision. +// +// This trait is conservative, for backwards compatibility. If it's true then +// the type is definitely trivially relocatable, but if it's false then the type +// may or may not be. +// +// Example: +// +// if constexpr (absl::is_trivially_relocatable::value) { +// memcpy(new_location, old_location, sizeof(T)); +// } else { +// new(new_location) T(std::move(*old_location)); +// old_location->~T(); +// } +// +// Upstream documentation: +// +// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable + +// If the compiler offers a builtin that tells us the answer, we can use that. +// This covers all of the cases in the fallback below, plus types that opt in +// using e.g. [[clang::trivial_abi]]. +// +// Clang on Windows has the builtin, but it falsely claims types with a +// user-provided destructor are trivial (http://b/275003464). So we opt out +// there. +// +// TODO(b/275003464): remove the opt-out once the bug is fixed. +// +// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not +// work with NVCC either. +#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \ + !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \ + !defined(__NVCC__) + template + struct is_trivially_relocatable : std::integral_constant + { + }; +#else + // Otherwise we use a fallback that detects only those types we can feasibly + // detect. Any time that has trivial move-construction and destruction + // operations is by definition trivially relocatable. + template + struct is_trivially_relocatable : absl::conjunction, absl::is_trivially_destructible> + { + }; +#endif + +// absl::is_constant_evaluated() +// +// Detects whether the function call occurs within a constant-evaluated context. +// Returns true if the evaluation of the call occurs within the evaluation of an +// expression or conversion that is manifestly constant-evaluated; otherwise +// returns false. +// +// This function is implemented in terms of `std::is_constant_evaluated` for +// c++20 and up. For older c++ versions, the function is implemented in terms +// of `__builtin_is_constant_evaluated` if available, otherwise the function +// will fail to compile. +// +// Applications can inspect `ABSL_HAVE_CONSTANT_EVALUATED` at compile time +// to check if this function is supported. +// +// Example: +// +// constexpr MyClass::MyClass(int param) { +// #ifdef ABSL_HAVE_CONSTANT_EVALUATED +// if (!absl::is_constant_evaluated()) { +// ABSL_LOG(INFO) << "MyClass(" << param << ")"; +// } +// #endif // ABSL_HAVE_CONSTANT_EVALUATED +// } +// +// Upstream documentation: +// +// http://en.cppreference.com/w/cpp/types/is_constant_evaluated +// http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#:~:text=__builtin_is_constant_evaluated +// +#if defined(ABSL_HAVE_CONSTANT_EVALUATED) + constexpr bool is_constant_evaluated() noexcept + { +#ifdef __cpp_lib_is_constant_evaluated + return std::is_constant_evaluated(); +#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated) + return __builtin_is_constant_evaluated(); +#endif + } +#endif // ABSL_HAVE_CONSTANT_EVALUATED + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_META_TYPE_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/numeric/bits.h b/CAPI/cpp/grpc/include/absl/numeric/bits.h new file mode 100644 index 00000000..577b63c7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/bits.h @@ -0,0 +1,188 @@ +// Copyright 2020 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: bits.h +// ----------------------------------------------------------------------------- +// +// This file contains implementations of C++20's bitwise math functions, as +// defined by: +// +// P0553R4: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p0553r4.html +// P0556R3: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0556r3.html +// P1355R2: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2019/p1355r2.html +// P1956R1: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p1956r1.pdf +// +// When using a standard library that implements these functions, we use the +// standard library's implementation. + +#ifndef ABSL_NUMERIC_BITS_H_ +#define ABSL_NUMERIC_BITS_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L +#include +#endif + +#include "absl/base/attributes.h" +#include "absl/numeric/internal/bits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN +#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L) + + // rotating + template + ABSL_MUST_USE_RESULT constexpr + typename std::enable_if::value, T>::type + rotl(T x, int s) noexcept + { + return numeric_internal::RotateLeft(x, s); + } + + template + ABSL_MUST_USE_RESULT constexpr + typename std::enable_if::value, T>::type + rotr(T x, int s) noexcept + { + return numeric_internal::RotateRight(x, s); + } + + // Counting functions + // + // While these functions are typically constexpr, on some platforms, they may + // not be marked as constexpr due to constraints of the compiler/available + // intrinsics. + template + ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, int>::type + countl_zero(T x) noexcept + { + return numeric_internal::CountLeadingZeroes(x); + } + + template + ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, int>::type + countl_one(T x) noexcept + { + // Avoid integer promotion to a wider type + return countl_zero(static_cast(~x)); + } + + template + ABSL_INTERNAL_CONSTEXPR_CTZ inline + typename std::enable_if::value, int>::type + countr_zero(T x) noexcept + { + return numeric_internal::CountTrailingZeroes(x); + } + + template + ABSL_INTERNAL_CONSTEXPR_CTZ inline + typename std::enable_if::value, int>::type + countr_one(T x) noexcept + { + // Avoid integer promotion to a wider type + return countr_zero(static_cast(~x)); + } + + template + ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline + typename std::enable_if::value, int>::type + popcount(T x) noexcept + { + return numeric_internal::Popcount(x); + } +#else // defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L + + using std::countl_one; + using std::countl_zero; + using std::countr_one; + using std::countr_zero; + using std::popcount; + using std::rotl; + using std::rotr; + +#endif + +#if !(defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) + // Returns: true if x is an integral power of two; false otherwise. + template + constexpr inline typename std::enable_if::value, bool>::type + has_single_bit(T x) noexcept + { + return x != 0 && (x & (x - 1)) == 0; + } + + // Returns: If x == 0, 0; otherwise one plus the base-2 logarithm of x, with any + // fractional part discarded. + template + ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, int>::type + bit_width(T x) noexcept + { + return std::numeric_limits::digits - countl_zero(x); + } + + // Returns: If x == 0, 0; otherwise the maximal value y such that + // has_single_bit(y) is true and y <= x. + template + ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, T>::type + bit_floor(T x) noexcept + { + return x == 0 ? 0 : T{1} << (bit_width(x) - 1); + } + + // Returns: N, where N is the smallest power of 2 greater than or equal to x. + // + // Preconditions: N is representable as a value of type T. + template + ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, T>::type + bit_ceil(T x) + { + // If T is narrower than unsigned, T{1} << bit_width will be promoted. We + // want to force it to wraparound so that bit_ceil of an invalid value are not + // core constant expressions. + // + // BitCeilNonPowerOf2 triggers an overflow in constexpr contexts if we would + // undergo promotion to unsigned but not fit the result into T without + // truncation. + return has_single_bit(x) ? T{1} << (bit_width(x) - 1) : numeric_internal::BitCeilNonPowerOf2(x); + } +#else // defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L + + using std::bit_ceil; + using std::bit_floor; + using std::bit_width; + using std::has_single_bit; + +#endif + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_NUMERIC_BITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/numeric/int128.h b/CAPI/cpp/grpc/include/absl/numeric/int128.h new file mode 100644 index 00000000..4a9a7a23 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/int128.h @@ -0,0 +1,1405 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: int128.h +// ----------------------------------------------------------------------------- +// +// This header file defines 128-bit integer types, `uint128` and `int128`. +// +// TODO(absl-team): This module is inconsistent as many inline `uint128` methods +// are defined in this file, while many inline `int128` methods are defined in +// the `int128_*_intrinsic.inc` files. + +#ifndef ABSL_NUMERIC_INT128_H_ +#define ABSL_NUMERIC_INT128_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" + +#if defined(_MSC_VER) +// In very old versions of MSVC and when the /Zc:wchar_t flag is off, wchar_t is +// a typedef for unsigned short. Otherwise wchar_t is mapped to the __wchar_t +// builtin type. We need to make sure not to define operator wchar_t() +// alongside operator unsigned short() in these instances. +#define ABSL_INTERNAL_WCHAR_T __wchar_t +#if defined(_M_X64) && !defined(_M_ARM64EC) +#include +#pragma intrinsic(_umul128) +#endif // defined(_M_X64) +#else // defined(_MSC_VER) +#define ABSL_INTERNAL_WCHAR_T wchar_t +#endif // defined(_MSC_VER) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class int128; + + // uint128 + // + // An unsigned 128-bit integer type. The API is meant to mimic an intrinsic type + // as closely as is practical, including exhibiting undefined behavior in + // analogous cases (e.g. division by zero). This type is intended to be a + // drop-in replacement once C++ supports an intrinsic `uint128_t` type; when + // that occurs, existing well-behaved uses of `uint128` will continue to work + // using that new type. + // + // Note: code written with this type will continue to compile once `uint128_t` + // is introduced, provided the replacement helper functions + // `Uint128(Low|High)64()` and `MakeUint128()` are made. + // + // A `uint128` supports the following: + // + // * Implicit construction from integral types + // * Explicit conversion to integral types + // + // Additionally, if your compiler supports `__int128`, `uint128` is + // interoperable with that type. (Abseil checks for this compatibility through + // the `ABSL_HAVE_INTRINSIC_INT128` macro.) + // + // However, a `uint128` differs from intrinsic integral types in the following + // ways: + // + // * Errors on implicit conversions that do not preserve value (such as + // loss of precision when converting to float values). + // * Requires explicit construction from and conversion to floating point + // types. + // * Conversion to integral types requires an explicit static_cast() to + // mimic use of the `-Wnarrowing` compiler flag. + // * The alignment requirement of `uint128` may differ from that of an + // intrinsic 128-bit integer type depending on platform and build + // configuration. + // + // Example: + // + // float y = absl::Uint128Max(); // Error. uint128 cannot be implicitly + // // converted to float. + // + // absl::uint128 v; + // uint64_t i = v; // Error + // uint64_t i = static_cast(v); // OK + // + class +#if defined(ABSL_HAVE_INTRINSIC_INT128) + alignas(unsigned __int128) +#endif // ABSL_HAVE_INTRINSIC_INT128 + uint128 + { + public: + uint128() = default; + + // Constructors from arithmetic types + constexpr uint128(int v); // NOLINT(runtime/explicit) + constexpr uint128(unsigned int v); // NOLINT(runtime/explicit) + constexpr uint128(long v); // NOLINT(runtime/int) + constexpr uint128(unsigned long v); // NOLINT(runtime/int) + constexpr uint128(long long v); // NOLINT(runtime/int) + constexpr uint128(unsigned long long v); // NOLINT(runtime/int) +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr uint128(__int128 v); // NOLINT(runtime/explicit) + constexpr uint128(unsigned __int128 v); // NOLINT(runtime/explicit) +#endif // ABSL_HAVE_INTRINSIC_INT128 + constexpr uint128(int128 v); // NOLINT(runtime/explicit) + explicit uint128(float v); + explicit uint128(double v); + explicit uint128(long double v); + + // Assignment operators from arithmetic types + uint128& operator=(int v); + uint128& operator=(unsigned int v); + uint128& operator=(long v); // NOLINT(runtime/int) + uint128& operator=(unsigned long v); // NOLINT(runtime/int) + uint128& operator=(long long v); // NOLINT(runtime/int) + uint128& operator=(unsigned long long v); // NOLINT(runtime/int) +#ifdef ABSL_HAVE_INTRINSIC_INT128 + uint128& operator=(__int128 v); + uint128& operator=(unsigned __int128 v); +#endif // ABSL_HAVE_INTRINSIC_INT128 + uint128& operator=(int128 v); + + // Conversion operators to other arithmetic types + constexpr explicit operator bool() const; + constexpr explicit operator char() const; + constexpr explicit operator signed char() const; + constexpr explicit operator unsigned char() const; + constexpr explicit operator char16_t() const; + constexpr explicit operator char32_t() const; + constexpr explicit operator ABSL_INTERNAL_WCHAR_T() const; + constexpr explicit operator short() const; // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned short() const; + constexpr explicit operator int() const; + constexpr explicit operator unsigned int() const; + constexpr explicit operator long() const; // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned long() const; + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator long long() const; + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned long long() const; +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr explicit operator __int128() const; + constexpr explicit operator unsigned __int128() const; +#endif // ABSL_HAVE_INTRINSIC_INT128 + explicit operator float() const; + explicit operator double() const; + explicit operator long double() const; + + // Trivial copy constructor, assignment operator and destructor. + + // Arithmetic operators. + uint128& operator+=(uint128 other); + uint128& operator-=(uint128 other); + uint128& operator*=(uint128 other); + // Long division/modulo for uint128. + uint128& operator/=(uint128 other); + uint128& operator%=(uint128 other); + uint128 operator++(int); + uint128 operator--(int); + uint128& operator<<=(int); + uint128& operator>>=(int); + uint128& operator&=(uint128 other); + uint128& operator|=(uint128 other); + uint128& operator^=(uint128 other); + uint128& operator++(); + uint128& operator--(); + + // Uint128Low64() + // + // Returns the lower 64-bit value of a `uint128` value. + friend constexpr uint64_t Uint128Low64(uint128 v); + + // Uint128High64() + // + // Returns the higher 64-bit value of a `uint128` value. + friend constexpr uint64_t Uint128High64(uint128 v); + + // MakeUInt128() + // + // Constructs a `uint128` numeric value from two 64-bit unsigned integers. + // Note that this factory function is the only way to construct a `uint128` + // from integer values greater than 2^64. + // + // Example: + // + // absl::uint128 big = absl::MakeUint128(1, 0); + friend constexpr uint128 MakeUint128(uint64_t high, uint64_t low); + + // Uint128Max() + // + // Returns the highest value for a 128-bit unsigned integer. + friend constexpr uint128 Uint128Max(); + + // Support for absl::Hash. + template + friend H AbslHashValue(H h, uint128 v) + { + return H::combine(std::move(h), Uint128High64(v), Uint128Low64(v)); + } + + // Support for absl::StrCat() etc. + template + friend void AbslStringify(Sink& sink, uint128 v) + { + sink.Append(v.ToString()); + } + + private: + constexpr uint128(uint64_t high, uint64_t low); + + std::string ToString() const; + + // TODO(strel) Update implementation to use __int128 once all users of + // uint128 are fixed to not depend on alignof(uint128) == 8. Also add + // alignas(16) to class definition to keep alignment consistent across + // platforms. +#if defined(ABSL_IS_LITTLE_ENDIAN) + uint64_t lo_; + uint64_t hi_; +#elif defined(ABSL_IS_BIG_ENDIAN) + uint64_t hi_; + uint64_t lo_; +#else // byte order +#error "Unsupported byte order: must be little-endian or big-endian." +#endif // byte order + }; + + // Prefer to use the constexpr `Uint128Max()`. + // + // TODO(absl-team) deprecate kuint128max once migration tool is released. + ABSL_DLL extern const uint128 kuint128max; + + // allow uint128 to be logged + std::ostream& operator<<(std::ostream& os, uint128 v); + + // TODO(strel) add operator>>(std::istream&, uint128) + + constexpr uint128 Uint128Max() + { + return uint128((std::numeric_limits::max)(), (std::numeric_limits::max)()); + } + + ABSL_NAMESPACE_END +} // namespace absl + +// Specialized numeric_limits for uint128. +namespace std +{ + template<> + class numeric_limits + { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = false; + static constexpr bool is_integer = true; + static constexpr bool is_exact = true; + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + static constexpr float_round_style round_style = round_toward_zero; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = true; + static constexpr int digits = 128; + static constexpr int digits10 = 38; + static constexpr int max_digits10 = 0; + static constexpr int radix = 2; + static constexpr int min_exponent = 0; + static constexpr int min_exponent10 = 0; + static constexpr int max_exponent = 0; + static constexpr int max_exponent10 = 0; +#ifdef ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool traps = numeric_limits::traps; +#else // ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool traps = numeric_limits::traps; +#endif // ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool tinyness_before = false; + + static constexpr absl::uint128(min)() + { + return 0; + } + static constexpr absl::uint128 lowest() + { + return 0; + } + static constexpr absl::uint128(max)() + { + return absl::Uint128Max(); + } + static constexpr absl::uint128 epsilon() + { + return 0; + } + static constexpr absl::uint128 round_error() + { + return 0; + } + static constexpr absl::uint128 infinity() + { + return 0; + } + static constexpr absl::uint128 quiet_NaN() + { + return 0; + } + static constexpr absl::uint128 signaling_NaN() + { + return 0; + } + static constexpr absl::uint128 denorm_min() + { + return 0; + } + }; +} // namespace std + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // int128 + // + // A signed 128-bit integer type. The API is meant to mimic an intrinsic + // integral type as closely as is practical, including exhibiting undefined + // behavior in analogous cases (e.g. division by zero). + // + // An `int128` supports the following: + // + // * Implicit construction from integral types + // * Explicit conversion to integral types + // + // However, an `int128` differs from intrinsic integral types in the following + // ways: + // + // * It is not implicitly convertible to other integral types. + // * Requires explicit construction from and conversion to floating point + // types. + + // Additionally, if your compiler supports `__int128`, `int128` is + // interoperable with that type. (Abseil checks for this compatibility through + // the `ABSL_HAVE_INTRINSIC_INT128` macro.) + // + // The design goal for `int128` is that it will be compatible with a future + // `int128_t`, if that type becomes a part of the standard. + // + // Example: + // + // float y = absl::int128(17); // Error. int128 cannot be implicitly + // // converted to float. + // + // absl::int128 v; + // int64_t i = v; // Error + // int64_t i = static_cast(v); // OK + // + class int128 + { + public: + int128() = default; + + // Constructors from arithmetic types + constexpr int128(int v); // NOLINT(runtime/explicit) + constexpr int128(unsigned int v); // NOLINT(runtime/explicit) + constexpr int128(long v); // NOLINT(runtime/int) + constexpr int128(unsigned long v); // NOLINT(runtime/int) + constexpr int128(long long v); // NOLINT(runtime/int) + constexpr int128(unsigned long long v); // NOLINT(runtime/int) +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr int128(__int128 v); // NOLINT(runtime/explicit) + constexpr explicit int128(unsigned __int128 v); +#endif // ABSL_HAVE_INTRINSIC_INT128 + constexpr explicit int128(uint128 v); + explicit int128(float v); + explicit int128(double v); + explicit int128(long double v); + + // Assignment operators from arithmetic types + int128& operator=(int v); + int128& operator=(unsigned int v); + int128& operator=(long v); // NOLINT(runtime/int) + int128& operator=(unsigned long v); // NOLINT(runtime/int) + int128& operator=(long long v); // NOLINT(runtime/int) + int128& operator=(unsigned long long v); // NOLINT(runtime/int) +#ifdef ABSL_HAVE_INTRINSIC_INT128 + int128& operator=(__int128 v); +#endif // ABSL_HAVE_INTRINSIC_INT128 + + // Conversion operators to other arithmetic types + constexpr explicit operator bool() const; + constexpr explicit operator char() const; + constexpr explicit operator signed char() const; + constexpr explicit operator unsigned char() const; + constexpr explicit operator char16_t() const; + constexpr explicit operator char32_t() const; + constexpr explicit operator ABSL_INTERNAL_WCHAR_T() const; + constexpr explicit operator short() const; // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned short() const; + constexpr explicit operator int() const; + constexpr explicit operator unsigned int() const; + constexpr explicit operator long() const; // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned long() const; + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator long long() const; + // NOLINTNEXTLINE(runtime/int) + constexpr explicit operator unsigned long long() const; +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr explicit operator __int128() const; + constexpr explicit operator unsigned __int128() const; +#endif // ABSL_HAVE_INTRINSIC_INT128 + explicit operator float() const; + explicit operator double() const; + explicit operator long double() const; + + // Trivial copy constructor, assignment operator and destructor. + + // Arithmetic operators + int128& operator+=(int128 other); + int128& operator-=(int128 other); + int128& operator*=(int128 other); + int128& operator/=(int128 other); + int128& operator%=(int128 other); + int128 operator++(int); // postfix increment: i++ + int128 operator--(int); // postfix decrement: i-- + int128& operator++(); // prefix increment: ++i + int128& operator--(); // prefix decrement: --i + int128& operator&=(int128 other); + int128& operator|=(int128 other); + int128& operator^=(int128 other); + int128& operator<<=(int amount); + int128& operator>>=(int amount); + + // Int128Low64() + // + // Returns the lower 64-bit value of a `int128` value. + friend constexpr uint64_t Int128Low64(int128 v); + + // Int128High64() + // + // Returns the higher 64-bit value of a `int128` value. + friend constexpr int64_t Int128High64(int128 v); + + // MakeInt128() + // + // Constructs a `int128` numeric value from two 64-bit integers. Note that + // signedness is conveyed in the upper `high` value. + // + // (absl::int128(1) << 64) * high + low + // + // Note that this factory function is the only way to construct a `int128` + // from integer values greater than 2^64 or less than -2^64. + // + // Example: + // + // absl::int128 big = absl::MakeInt128(1, 0); + // absl::int128 big_n = absl::MakeInt128(-1, 0); + friend constexpr int128 MakeInt128(int64_t high, uint64_t low); + + // Int128Max() + // + // Returns the maximum value for a 128-bit signed integer. + friend constexpr int128 Int128Max(); + + // Int128Min() + // + // Returns the minimum value for a 128-bit signed integer. + friend constexpr int128 Int128Min(); + + // Support for absl::Hash. + template + friend H AbslHashValue(H h, int128 v) + { + return H::combine(std::move(h), Int128High64(v), Int128Low64(v)); + } + + // Support for absl::StrCat() etc. + template + friend void AbslStringify(Sink& sink, int128 v) + { + sink.Append(v.ToString()); + } + + private: + constexpr int128(int64_t high, uint64_t low); + + std::string ToString() const; + +#if defined(ABSL_HAVE_INTRINSIC_INT128) + __int128 v_; +#else // ABSL_HAVE_INTRINSIC_INT128 +#if defined(ABSL_IS_LITTLE_ENDIAN) + uint64_t lo_; + int64_t hi_; +#elif defined(ABSL_IS_BIG_ENDIAN) + int64_t hi_; + uint64_t lo_; +#else // byte order +#error "Unsupported byte order: must be little-endian or big-endian." +#endif // byte order +#endif // ABSL_HAVE_INTRINSIC_INT128 + }; + + std::ostream& operator<<(std::ostream& os, int128 v); + + // TODO(absl-team) add operator>>(std::istream&, int128) + + constexpr int128 Int128Max() + { + return int128((std::numeric_limits::max)(), (std::numeric_limits::max)()); + } + + constexpr int128 Int128Min() + { + return int128((std::numeric_limits::min)(), 0); + } + + ABSL_NAMESPACE_END +} // namespace absl + +// Specialized numeric_limits for int128. +namespace std +{ + template<> + class numeric_limits + { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = true; + static constexpr bool is_exact = true; + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + static constexpr float_round_style round_style = round_toward_zero; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 127; + static constexpr int digits10 = 38; + static constexpr int max_digits10 = 0; + static constexpr int radix = 2; + static constexpr int min_exponent = 0; + static constexpr int min_exponent10 = 0; + static constexpr int max_exponent = 0; + static constexpr int max_exponent10 = 0; +#ifdef ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool traps = numeric_limits<__int128>::traps; +#else // ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool traps = numeric_limits::traps; +#endif // ABSL_HAVE_INTRINSIC_INT128 + static constexpr bool tinyness_before = false; + + static constexpr absl::int128(min)() + { + return absl::Int128Min(); + } + static constexpr absl::int128 lowest() + { + return absl::Int128Min(); + } + static constexpr absl::int128(max)() + { + return absl::Int128Max(); + } + static constexpr absl::int128 epsilon() + { + return 0; + } + static constexpr absl::int128 round_error() + { + return 0; + } + static constexpr absl::int128 infinity() + { + return 0; + } + static constexpr absl::int128 quiet_NaN() + { + return 0; + } + static constexpr absl::int128 signaling_NaN() + { + return 0; + } + static constexpr absl::int128 denorm_min() + { + return 0; + } + }; +} // namespace std + +// -------------------------------------------------------------------------- +// Implementation details follow +// -------------------------------------------------------------------------- +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + constexpr uint128 MakeUint128(uint64_t high, uint64_t low) + { + return uint128(high, low); + } + + // Assignment from integer types. + + inline uint128& uint128::operator=(int v) + { + return *this = uint128(v); + } + + inline uint128& uint128::operator=(unsigned int v) + { + return *this = uint128(v); + } + + inline uint128& uint128::operator=(long v) + { // NOLINT(runtime/int) + return *this = uint128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline uint128& uint128::operator=(unsigned long v) + { + return *this = uint128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline uint128& uint128::operator=(long long v) + { + return *this = uint128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline uint128& uint128::operator=(unsigned long long v) + { + return *this = uint128(v); + } + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + inline uint128& uint128::operator=(__int128 v) + { + return *this = uint128(v); + } + + inline uint128& uint128::operator=(unsigned __int128 v) + { + return *this = uint128(v); + } +#endif // ABSL_HAVE_INTRINSIC_INT128 + + inline uint128& uint128::operator=(int128 v) + { + return *this = uint128(v); + } + + // Arithmetic operators. + + constexpr uint128 operator<<(uint128 lhs, int amount); + constexpr uint128 operator>>(uint128 lhs, int amount); + constexpr uint128 operator+(uint128 lhs, uint128 rhs); + constexpr uint128 operator-(uint128 lhs, uint128 rhs); + uint128 operator*(uint128 lhs, uint128 rhs); + uint128 operator/(uint128 lhs, uint128 rhs); + uint128 operator%(uint128 lhs, uint128 rhs); + + inline uint128& uint128::operator<<=(int amount) + { + *this = *this << amount; + return *this; + } + + inline uint128& uint128::operator>>=(int amount) + { + *this = *this >> amount; + return *this; + } + + inline uint128& uint128::operator+=(uint128 other) + { + *this = *this + other; + return *this; + } + + inline uint128& uint128::operator-=(uint128 other) + { + *this = *this - other; + return *this; + } + + inline uint128& uint128::operator*=(uint128 other) + { + *this = *this * other; + return *this; + } + + inline uint128& uint128::operator/=(uint128 other) + { + *this = *this / other; + return *this; + } + + inline uint128& uint128::operator%=(uint128 other) + { + *this = *this % other; + return *this; + } + + constexpr uint64_t Uint128Low64(uint128 v) + { + return v.lo_; + } + + constexpr uint64_t Uint128High64(uint128 v) + { + return v.hi_; + } + + // Constructors from integer types. + +#if defined(ABSL_IS_LITTLE_ENDIAN) + + constexpr uint128::uint128(uint64_t high, uint64_t low) : + lo_{low}, + hi_{high} + { + } + + constexpr uint128::uint128(int v) : + lo_{static_cast(v)}, + hi_{v < 0 ? (std::numeric_limits::max)() : 0} + { + } + constexpr uint128::uint128(long v) // NOLINT(runtime/int) + : + lo_{static_cast(v)}, + hi_{v < 0 ? (std::numeric_limits::max)() : 0} + { + } + constexpr uint128::uint128(long long v) // NOLINT(runtime/int) + : + lo_{static_cast(v)}, + hi_{v < 0 ? (std::numeric_limits::max)() : 0} + { + } + + constexpr uint128::uint128(unsigned int v) : + lo_{v}, + hi_{0} + { + } + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::uint128(unsigned long v) : + lo_{v}, + hi_{0} + { + } + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::uint128(unsigned long long v) : + lo_{v}, + hi_{0} + { + } + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr uint128::uint128(__int128 v) : + lo_{static_cast(v & ~uint64_t{0})}, + hi_{static_cast(static_cast(v) >> 64)} + { + } + constexpr uint128::uint128(unsigned __int128 v) : + lo_{static_cast(v & ~uint64_t{0})}, + hi_{static_cast(v >> 64)} + { + } +#endif // ABSL_HAVE_INTRINSIC_INT128 + + constexpr uint128::uint128(int128 v) : + lo_{Int128Low64(v)}, + hi_{static_cast(Int128High64(v))} + { + } + +#elif defined(ABSL_IS_BIG_ENDIAN) + + constexpr uint128::uint128(uint64_t high, uint64_t low) : + hi_{high}, + lo_{low} + { + } + + constexpr uint128::uint128(int v) : + hi_{v < 0 ? (std::numeric_limits::max)() : 0}, + lo_{static_cast(v)} + { + } + constexpr uint128::uint128(long v) // NOLINT(runtime/int) + : + hi_{v < 0 ? (std::numeric_limits::max)() : 0}, + lo_{static_cast(v)} + { + } + constexpr uint128::uint128(long long v) // NOLINT(runtime/int) + : + hi_{v < 0 ? (std::numeric_limits::max)() : 0}, + lo_{static_cast(v)} + { + } + + constexpr uint128::uint128(unsigned int v) : + hi_{0}, + lo_{v} + { + } + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::uint128(unsigned long v) : + hi_{0}, + lo_{v} + { + } + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::uint128(unsigned long long v) : + hi_{0}, + lo_{v} + { + } + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr uint128::uint128(__int128 v) : + hi_{static_cast(static_cast(v) >> 64)}, + lo_{static_cast(v & ~uint64_t{0})} + { + } + constexpr uint128::uint128(unsigned __int128 v) : + hi_{static_cast(v >> 64)}, + lo_{static_cast(v & ~uint64_t{0})} + { + } +#endif // ABSL_HAVE_INTRINSIC_INT128 + + constexpr uint128::uint128(int128 v) : + hi_{static_cast(Int128High64(v))}, + lo_{Int128Low64(v)} + { + } + +#else // byte order +#error "Unsupported byte order: must be little-endian or big-endian." +#endif // byte order + + // Conversion operators to integer types. + + constexpr uint128::operator bool() const + { + return lo_ || hi_; + } + + constexpr uint128::operator char() const + { + return static_cast(lo_); + } + + constexpr uint128::operator signed char() const + { + return static_cast(lo_); + } + + constexpr uint128::operator unsigned char() const + { + return static_cast(lo_); + } + + constexpr uint128::operator char16_t() const + { + return static_cast(lo_); + } + + constexpr uint128::operator char32_t() const + { + return static_cast(lo_); + } + + constexpr uint128::operator ABSL_INTERNAL_WCHAR_T() const + { + return static_cast(lo_); + } + + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::operator short() const + { + return static_cast(lo_); + } + + constexpr uint128::operator unsigned short() const + { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) + } + + constexpr uint128::operator int() const + { + return static_cast(lo_); + } + + constexpr uint128::operator unsigned int() const + { + return static_cast(lo_); + } + + // NOLINTNEXTLINE(runtime/int) + constexpr uint128::operator long() const + { + return static_cast(lo_); + } + + constexpr uint128::operator unsigned long() const + { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) + } + + constexpr uint128::operator long long() const + { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) + } + + constexpr uint128::operator unsigned long long() const + { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) + } + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr uint128::operator __int128() const + { + return (static_cast<__int128>(hi_) << 64) + lo_; + } + + constexpr uint128::operator unsigned __int128() const + { + return (static_cast(hi_) << 64) + lo_; + } +#endif // ABSL_HAVE_INTRINSIC_INT128 + + // Conversion operators to floating point types. + + inline uint128::operator float() const + { + return static_cast(lo_) + std::ldexp(static_cast(hi_), 64); + } + + inline uint128::operator double() const + { + return static_cast(lo_) + std::ldexp(static_cast(hi_), 64); + } + + inline uint128::operator long double() const + { + return static_cast(lo_) + + std::ldexp(static_cast(hi_), 64); + } + + // Comparison operators. + + constexpr bool operator==(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) == + static_cast(rhs); +#else + return (Uint128Low64(lhs) == Uint128Low64(rhs) && Uint128High64(lhs) == Uint128High64(rhs)); +#endif + } + + constexpr bool operator!=(uint128 lhs, uint128 rhs) + { + return !(lhs == rhs); + } + + constexpr bool operator<(uint128 lhs, uint128 rhs) + { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) < + static_cast(rhs); +#else + return (Uint128High64(lhs) == Uint128High64(rhs)) ? (Uint128Low64(lhs) < Uint128Low64(rhs)) : (Uint128High64(lhs) < Uint128High64(rhs)); +#endif + } + + constexpr bool operator>(uint128 lhs, uint128 rhs) + { + return rhs < lhs; + } + + constexpr bool operator<=(uint128 lhs, uint128 rhs) + { + return !(rhs < lhs); + } + + constexpr bool operator>=(uint128 lhs, uint128 rhs) + { + return !(lhs < rhs); + } + + // Unary operators. + + constexpr inline uint128 operator+(uint128 val) + { + return val; + } + + constexpr inline int128 operator+(int128 val) + { + return val; + } + + constexpr uint128 operator-(uint128 val) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return -static_cast(val); +#else + return MakeUint128( + ~Uint128High64(val) + static_cast(Uint128Low64(val) == 0), + ~Uint128Low64(val) + 1 + ); +#endif + } + + constexpr inline bool operator!(uint128 val) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return !static_cast(val); +#else + return !Uint128High64(val) && !Uint128Low64(val); +#endif + } + + // Logical operators. + + constexpr inline uint128 operator~(uint128 val) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return ~static_cast(val); +#else + return MakeUint128(~Uint128High64(val), ~Uint128Low64(val)); +#endif + } + + constexpr inline uint128 operator|(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) | + static_cast(rhs); +#else + return MakeUint128(Uint128High64(lhs) | Uint128High64(rhs), Uint128Low64(lhs) | Uint128Low64(rhs)); +#endif + } + + constexpr inline uint128 operator&(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) & + static_cast(rhs); +#else + return MakeUint128(Uint128High64(lhs) & Uint128High64(rhs), Uint128Low64(lhs) & Uint128Low64(rhs)); +#endif + } + + constexpr inline uint128 operator^(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) ^ + static_cast(rhs); +#else + return MakeUint128(Uint128High64(lhs) ^ Uint128High64(rhs), Uint128Low64(lhs) ^ Uint128Low64(rhs)); +#endif + } + + inline uint128& uint128::operator|=(uint128 other) + { + *this = *this | other; + return *this; + } + + inline uint128& uint128::operator&=(uint128 other) + { + *this = *this & other; + return *this; + } + + inline uint128& uint128::operator^=(uint128 other) + { + *this = *this ^ other; + return *this; + } + + // Arithmetic operators. + + constexpr uint128 operator<<(uint128 lhs, int amount) + { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) << amount; +#else + // uint64_t shifts of >= 64 are undefined, so we will need some + // special-casing. + return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0) : amount == 0 ? lhs : + MakeUint128((Uint128High64(lhs) << amount) | (Uint128Low64(lhs) >> (64 - amount)), Uint128Low64(lhs) << amount); +#endif + } + + constexpr uint128 operator>>(uint128 lhs, int amount) + { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) >> amount; +#else + // uint64_t shifts of >= 64 are undefined, so we will need some + // special-casing. + return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64)) : amount == 0 ? lhs : + MakeUint128(Uint128High64(lhs) >> amount, (Uint128Low64(lhs) >> amount) | (Uint128High64(lhs) << (64 - amount))); +#endif + } + +#if !defined(ABSL_HAVE_INTRINSIC_INT128) + namespace int128_internal + { + constexpr uint128 AddResult(uint128 result, uint128 lhs) + { + // check for carry + return (Uint128Low64(result) < Uint128Low64(lhs)) ? MakeUint128(Uint128High64(result) + 1, Uint128Low64(result)) : result; + } + } // namespace int128_internal +#endif + + constexpr uint128 operator+(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) + + static_cast(rhs); +#else + return int128_internal::AddResult( + MakeUint128(Uint128High64(lhs) + Uint128High64(rhs), Uint128Low64(lhs) + Uint128Low64(rhs)), + lhs + ); +#endif + } + +#if !defined(ABSL_HAVE_INTRINSIC_INT128) + namespace int128_internal + { + constexpr uint128 SubstructResult(uint128 result, uint128 lhs, uint128 rhs) + { + // check for carry + return (Uint128Low64(lhs) < Uint128Low64(rhs)) ? MakeUint128(Uint128High64(result) - 1, Uint128Low64(result)) : result; + } + } // namespace int128_internal +#endif + + constexpr uint128 operator-(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return static_cast(lhs) - + static_cast(rhs); +#else + return int128_internal::SubstructResult( + MakeUint128(Uint128High64(lhs) - Uint128High64(rhs), Uint128Low64(lhs) - Uint128Low64(rhs)), + lhs, + rhs + ); +#endif + } + + inline uint128 operator*(uint128 lhs, uint128 rhs) + { +#if defined(ABSL_HAVE_INTRINSIC_INT128) + // TODO(strel) Remove once alignment issues are resolved and unsigned __int128 + // can be used for uint128 storage. + return static_cast(lhs) * + static_cast(rhs); +#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) + uint64_t carry; + uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry); + return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) + Uint128High64(lhs) * Uint128Low64(rhs) + carry, low); +#else // ABSL_HAVE_INTRINSIC128 + uint64_t a32 = Uint128Low64(lhs) >> 32; + uint64_t a00 = Uint128Low64(lhs) & 0xffffffff; + uint64_t b32 = Uint128Low64(rhs) >> 32; + uint64_t b00 = Uint128Low64(rhs) & 0xffffffff; + uint128 result = + MakeUint128(Uint128High64(lhs) * Uint128Low64(rhs) + Uint128Low64(lhs) * Uint128High64(rhs) + a32 * b32, a00 * b00); + result += uint128(a32 * b00) << 32; + result += uint128(a00 * b32) << 32; + return result; +#endif // ABSL_HAVE_INTRINSIC128 + } + +#if defined(ABSL_HAVE_INTRINSIC_INT128) + inline uint128 operator/(uint128 lhs, uint128 rhs) + { + return static_cast(lhs) / + static_cast(rhs); + } + + inline uint128 operator%(uint128 lhs, uint128 rhs) + { + return static_cast(lhs) % + static_cast(rhs); + } +#endif + + // Increment/decrement operators. + + inline uint128 uint128::operator++(int) + { + uint128 tmp(*this); + *this += 1; + return tmp; + } + + inline uint128 uint128::operator--(int) + { + uint128 tmp(*this); + *this -= 1; + return tmp; + } + + inline uint128& uint128::operator++() + { + *this += 1; + return *this; + } + + inline uint128& uint128::operator--() + { + *this -= 1; + return *this; + } + + constexpr int128 MakeInt128(int64_t high, uint64_t low) + { + return int128(high, low); + } + + // Assignment from integer types. + inline int128& int128::operator=(int v) + { + return *this = int128(v); + } + + inline int128& int128::operator=(unsigned int v) + { + return *this = int128(v); + } + + inline int128& int128::operator=(long v) + { // NOLINT(runtime/int) + return *this = int128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline int128& int128::operator=(unsigned long v) + { + return *this = int128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline int128& int128::operator=(long long v) + { + return *this = int128(v); + } + + // NOLINTNEXTLINE(runtime/int) + inline int128& int128::operator=(unsigned long long v) + { + return *this = int128(v); + } + + // Arithmetic operators. + constexpr int128 operator-(int128 v); + constexpr int128 operator+(int128 lhs, int128 rhs); + constexpr int128 operator-(int128 lhs, int128 rhs); + int128 operator*(int128 lhs, int128 rhs); + int128 operator/(int128 lhs, int128 rhs); + int128 operator%(int128 lhs, int128 rhs); + constexpr int128 operator|(int128 lhs, int128 rhs); + constexpr int128 operator&(int128 lhs, int128 rhs); + constexpr int128 operator^(int128 lhs, int128 rhs); + constexpr int128 operator<<(int128 lhs, int amount); + constexpr int128 operator>>(int128 lhs, int amount); + + inline int128& int128::operator+=(int128 other) + { + *this = *this + other; + return *this; + } + + inline int128& int128::operator-=(int128 other) + { + *this = *this - other; + return *this; + } + + inline int128& int128::operator*=(int128 other) + { + *this = *this * other; + return *this; + } + + inline int128& int128::operator/=(int128 other) + { + *this = *this / other; + return *this; + } + + inline int128& int128::operator%=(int128 other) + { + *this = *this % other; + return *this; + } + + inline int128& int128::operator|=(int128 other) + { + *this = *this | other; + return *this; + } + + inline int128& int128::operator&=(int128 other) + { + *this = *this & other; + return *this; + } + + inline int128& int128::operator^=(int128 other) + { + *this = *this ^ other; + return *this; + } + + inline int128& int128::operator<<=(int amount) + { + *this = *this << amount; + return *this; + } + + inline int128& int128::operator>>=(int amount) + { + *this = *this >> amount; + return *this; + } + + // Forward declaration for comparison operators. + constexpr bool operator!=(int128 lhs, int128 rhs); + + namespace int128_internal + { + + // Casts from unsigned to signed while preserving the underlying binary + // representation. + constexpr int64_t BitCastToSigned(uint64_t v) + { + // Casting an unsigned integer to a signed integer of the same + // width is implementation defined behavior if the source value would not fit + // in the destination type. We step around it with a roundtrip bitwise not + // operation to make sure this function remains constexpr. Clang, GCC, and + // MSVC optimize this to a no-op on x86-64. + return v & (uint64_t{1} << 63) ? ~static_cast(~v) : static_cast(v); + } + + } // namespace int128_internal + +#if defined(ABSL_HAVE_INTRINSIC_INT128) +#include "absl/numeric/int128_have_intrinsic.inc" // IWYU pragma: export +#else // ABSL_HAVE_INTRINSIC_INT128 +#include "absl/numeric/int128_no_intrinsic.inc" // IWYU pragma: export +#endif // ABSL_HAVE_INTRINSIC_INT128 + + ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_WCHAR_T + +#endif // ABSL_NUMERIC_INT128_H_ diff --git a/CAPI/cpp/grpc/include/absl/numeric/int128_have_intrinsic.inc b/CAPI/cpp/grpc/include/absl/numeric/int128_have_intrinsic.inc new file mode 100644 index 00000000..6f1ac644 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/int128_have_intrinsic.inc @@ -0,0 +1,293 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains :int128 implementation details that depend on internal +// representation when ABSL_HAVE_INTRINSIC_INT128 is defined. This file is +// included by int128.h and relies on ABSL_INTERNAL_WCHAR_T being defined. + +namespace int128_internal { + +// Casts from unsigned to signed while preserving the underlying binary +// representation. +constexpr __int128 BitCastToSigned(unsigned __int128 v) { + // Casting an unsigned integer to a signed integer of the same + // width is implementation defined behavior if the source value would not fit + // in the destination type. We step around it with a roundtrip bitwise not + // operation to make sure this function remains constexpr. Clang and GCC + // optimize this to a no-op on x86-64. + return v & (static_cast(1) << 127) + ? ~static_cast<__int128>(~v) + : static_cast<__int128>(v); +} + +} // namespace int128_internal + +inline int128& int128::operator=(__int128 v) { + v_ = v; + return *this; +} + +constexpr uint64_t Int128Low64(int128 v) { + return static_cast(v.v_ & ~uint64_t{0}); +} + +constexpr int64_t Int128High64(int128 v) { + // Initially cast to unsigned to prevent a right shift on a negative value. + return int128_internal::BitCastToSigned( + static_cast(static_cast(v.v_) >> 64)); +} + +constexpr int128::int128(int64_t high, uint64_t low) + // Initially cast to unsigned to prevent a left shift that overflows. + : v_(int128_internal::BitCastToSigned(static_cast(high) + << 64) | + low) {} + + +constexpr int128::int128(int v) : v_{v} {} + +constexpr int128::int128(long v) : v_{v} {} // NOLINT(runtime/int) + +constexpr int128::int128(long long v) : v_{v} {} // NOLINT(runtime/int) + +constexpr int128::int128(__int128 v) : v_{v} {} + +constexpr int128::int128(unsigned int v) : v_{v} {} + +constexpr int128::int128(unsigned long v) : v_{v} {} // NOLINT(runtime/int) + +// NOLINTNEXTLINE(runtime/int) +constexpr int128::int128(unsigned long long v) : v_{v} {} + +constexpr int128::int128(unsigned __int128 v) : v_{static_cast<__int128>(v)} {} + +inline int128::int128(float v) { + v_ = static_cast<__int128>(v); +} + +inline int128::int128(double v) { + v_ = static_cast<__int128>(v); +} + +inline int128::int128(long double v) { + v_ = static_cast<__int128>(v); +} + +constexpr int128::int128(uint128 v) : v_{static_cast<__int128>(v)} {} + +constexpr int128::operator bool() const { return static_cast(v_); } + +constexpr int128::operator char() const { return static_cast(v_); } + +constexpr int128::operator signed char() const { + return static_cast(v_); +} + +constexpr int128::operator unsigned char() const { + return static_cast(v_); +} + +constexpr int128::operator char16_t() const { + return static_cast(v_); +} + +constexpr int128::operator char32_t() const { + return static_cast(v_); +} + +constexpr int128::operator ABSL_INTERNAL_WCHAR_T() const { + return static_cast(v_); +} + +constexpr int128::operator short() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator unsigned short() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator int() const { + return static_cast(v_); +} + +constexpr int128::operator unsigned int() const { + return static_cast(v_); +} + +constexpr int128::operator long() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator unsigned long() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator long long() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator unsigned long long() const { // NOLINT(runtime/int) + return static_cast(v_); // NOLINT(runtime/int) +} + +constexpr int128::operator __int128() const { return v_; } + +constexpr int128::operator unsigned __int128() const { + return static_cast(v_); +} + +// Clang on PowerPC sometimes produces incorrect __int128 to floating point +// conversions. In that case, we do the conversion with a similar implementation +// to the conversion operators in int128_no_intrinsic.inc. +#if defined(__clang__) && !defined(__ppc64__) +inline int128::operator float() const { return static_cast(v_); } + +inline int128::operator double() const { return static_cast(v_); } + +inline int128::operator long double() const { + return static_cast(v_); +} + +#else // Clang on PowerPC + +inline int128::operator float() const { + // We must convert the absolute value and then negate as needed, because + // floating point types are typically sign-magnitude. Otherwise, the + // difference between the high and low 64 bits when interpreted as two's + // complement overwhelms the precision of the mantissa. + // + // Also check to make sure we don't negate Int128Min() + return v_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(Int128Low64(*this)) + + std::ldexp(static_cast(Int128High64(*this)), 64); +} + +inline int128::operator double() const { + // See comment in int128::operator float() above. + return v_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(Int128Low64(*this)) + + std::ldexp(static_cast(Int128High64(*this)), 64); +} + +inline int128::operator long double() const { + // See comment in int128::operator float() above. + return v_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(Int128Low64(*this)) + + std::ldexp(static_cast(Int128High64(*this)), + 64); +} +#endif // Clang on PowerPC + +// Comparison operators. + +constexpr bool operator==(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) == static_cast<__int128>(rhs); +} + +constexpr bool operator!=(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) != static_cast<__int128>(rhs); +} + +constexpr bool operator<(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) < static_cast<__int128>(rhs); +} + +constexpr bool operator>(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) > static_cast<__int128>(rhs); +} + +constexpr bool operator<=(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) <= static_cast<__int128>(rhs); +} + +constexpr bool operator>=(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) >= static_cast<__int128>(rhs); +} + +// Unary operators. + +constexpr int128 operator-(int128 v) { return -static_cast<__int128>(v); } + +constexpr bool operator!(int128 v) { return !static_cast<__int128>(v); } + +constexpr int128 operator~(int128 val) { return ~static_cast<__int128>(val); } + +// Arithmetic operators. + +constexpr int128 operator+(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) + static_cast<__int128>(rhs); +} + +constexpr int128 operator-(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) - static_cast<__int128>(rhs); +} + +inline int128 operator*(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) * static_cast<__int128>(rhs); +} + +inline int128 operator/(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) / static_cast<__int128>(rhs); +} + +inline int128 operator%(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) % static_cast<__int128>(rhs); +} + +inline int128 int128::operator++(int) { + int128 tmp(*this); + ++v_; + return tmp; +} + +inline int128 int128::operator--(int) { + int128 tmp(*this); + --v_; + return tmp; +} + +inline int128& int128::operator++() { + ++v_; + return *this; +} + +inline int128& int128::operator--() { + --v_; + return *this; +} + +constexpr int128 operator|(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) | static_cast<__int128>(rhs); +} + +constexpr int128 operator&(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) & static_cast<__int128>(rhs); +} + +constexpr int128 operator^(int128 lhs, int128 rhs) { + return static_cast<__int128>(lhs) ^ static_cast<__int128>(rhs); +} + +constexpr int128 operator<<(int128 lhs, int amount) { + return static_cast<__int128>(lhs) << amount; +} + +constexpr int128 operator>>(int128 lhs, int amount) { + return static_cast<__int128>(lhs) >> amount; +} diff --git a/CAPI/cpp/grpc/include/absl/numeric/int128_no_intrinsic.inc b/CAPI/cpp/grpc/include/absl/numeric/int128_no_intrinsic.inc new file mode 100644 index 00000000..6f5d8377 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/int128_no_intrinsic.inc @@ -0,0 +1,328 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains :int128 implementation details that depend on internal +// representation when ABSL_HAVE_INTRINSIC_INT128 is *not* defined. This file +// is included by int128.h and relies on ABSL_INTERNAL_WCHAR_T being defined. + +constexpr uint64_t Int128Low64(int128 v) { return v.lo_; } + +constexpr int64_t Int128High64(int128 v) { return v.hi_; } + +#if defined(ABSL_IS_LITTLE_ENDIAN) + +constexpr int128::int128(int64_t high, uint64_t low) : lo_(low), hi_(high) {} + +constexpr int128::int128(int v) + : lo_{static_cast(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {} +constexpr int128::int128(long v) // NOLINT(runtime/int) + : lo_{static_cast(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {} +constexpr int128::int128(long long v) // NOLINT(runtime/int) + : lo_{static_cast(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {} + +constexpr int128::int128(unsigned int v) : lo_{v}, hi_{0} {} +// NOLINTNEXTLINE(runtime/int) +constexpr int128::int128(unsigned long v) : lo_{v}, hi_{0} {} +// NOLINTNEXTLINE(runtime/int) +constexpr int128::int128(unsigned long long v) : lo_{v}, hi_{0} {} + +constexpr int128::int128(uint128 v) + : lo_{Uint128Low64(v)}, hi_{static_cast(Uint128High64(v))} {} + +#elif defined(ABSL_IS_BIG_ENDIAN) + +constexpr int128::int128(int64_t high, uint64_t low) : hi_{high}, lo_{low} {} + +constexpr int128::int128(int v) + : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast(v)} {} +constexpr int128::int128(long v) // NOLINT(runtime/int) + : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast(v)} {} +constexpr int128::int128(long long v) // NOLINT(runtime/int) + : hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast(v)} {} + +constexpr int128::int128(unsigned int v) : hi_{0}, lo_{v} {} +// NOLINTNEXTLINE(runtime/int) +constexpr int128::int128(unsigned long v) : hi_{0}, lo_{v} {} +// NOLINTNEXTLINE(runtime/int) +constexpr int128::int128(unsigned long long v) : hi_{0}, lo_{v} {} + +constexpr int128::int128(uint128 v) + : hi_{static_cast(Uint128High64(v))}, lo_{Uint128Low64(v)} {} + +#else // byte order +#error "Unsupported byte order: must be little-endian or big-endian." +#endif // byte order + +constexpr int128::operator bool() const { return lo_ || hi_; } + +constexpr int128::operator char() const { + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator signed char() const { + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator unsigned char() const { + return static_cast(lo_); +} + +constexpr int128::operator char16_t() const { + return static_cast(lo_); +} + +constexpr int128::operator char32_t() const { + return static_cast(lo_); +} + +constexpr int128::operator ABSL_INTERNAL_WCHAR_T() const { + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator short() const { // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator unsigned short() const { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) +} + +constexpr int128::operator int() const { + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator unsigned int() const { + return static_cast(lo_); +} + +constexpr int128::operator long() const { // NOLINT(runtime/int) + // NOLINTNEXTLINE(runtime/int) + return static_cast(static_cast(*this)); +} + +constexpr int128::operator unsigned long() const { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) +} + +constexpr int128::operator long long() const { // NOLINT(runtime/int) + // We don't bother checking the value of hi_. If *this < 0, lo_'s high bit + // must be set in order for the value to fit into a long long. Conversely, if + // lo_'s high bit is set, *this must be < 0 for the value to fit. + return int128_internal::BitCastToSigned(lo_); +} + +constexpr int128::operator unsigned long long() const { // NOLINT(runtime/int) + return static_cast(lo_); // NOLINT(runtime/int) +} + +inline int128::operator float() const { + // We must convert the absolute value and then negate as needed, because + // floating point types are typically sign-magnitude. Otherwise, the + // difference between the high and low 64 bits when interpreted as two's + // complement overwhelms the precision of the mantissa. + // + // Also check to make sure we don't negate Int128Min() + return hi_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(lo_) + + std::ldexp(static_cast(hi_), 64); +} + +inline int128::operator double() const { + // See comment in int128::operator float() above. + return hi_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(lo_) + + std::ldexp(static_cast(hi_), 64); +} + +inline int128::operator long double() const { + // See comment in int128::operator float() above. + return hi_ < 0 && *this != Int128Min() + ? -static_cast(-*this) + : static_cast(lo_) + + std::ldexp(static_cast(hi_), 64); +} + +// Comparison operators. + +constexpr bool operator==(int128 lhs, int128 rhs) { + return (Int128Low64(lhs) == Int128Low64(rhs) && + Int128High64(lhs) == Int128High64(rhs)); +} + +constexpr bool operator!=(int128 lhs, int128 rhs) { return !(lhs == rhs); } + +constexpr bool operator<(int128 lhs, int128 rhs) { + return (Int128High64(lhs) == Int128High64(rhs)) + ? (Int128Low64(lhs) < Int128Low64(rhs)) + : (Int128High64(lhs) < Int128High64(rhs)); +} + +constexpr bool operator>(int128 lhs, int128 rhs) { + return (Int128High64(lhs) == Int128High64(rhs)) + ? (Int128Low64(lhs) > Int128Low64(rhs)) + : (Int128High64(lhs) > Int128High64(rhs)); +} + +constexpr bool operator<=(int128 lhs, int128 rhs) { return !(lhs > rhs); } + +constexpr bool operator>=(int128 lhs, int128 rhs) { return !(lhs < rhs); } + +// Unary operators. + +constexpr int128 operator-(int128 v) { + return MakeInt128(~Int128High64(v) + (Int128Low64(v) == 0), + ~Int128Low64(v) + 1); +} + +constexpr bool operator!(int128 v) { + return !Int128Low64(v) && !Int128High64(v); +} + +constexpr int128 operator~(int128 val) { + return MakeInt128(~Int128High64(val), ~Int128Low64(val)); +} + +// Arithmetic operators. + +namespace int128_internal { +constexpr int128 SignedAddResult(int128 result, int128 lhs) { + // check for carry + return (Int128Low64(result) < Int128Low64(lhs)) + ? MakeInt128(Int128High64(result) + 1, Int128Low64(result)) + : result; +} +} // namespace int128_internal +constexpr int128 operator+(int128 lhs, int128 rhs) { + return int128_internal::SignedAddResult( + MakeInt128(Int128High64(lhs) + Int128High64(rhs), + Int128Low64(lhs) + Int128Low64(rhs)), + lhs); +} + +namespace int128_internal { +constexpr int128 SignedSubstructResult(int128 result, int128 lhs, int128 rhs) { + // check for carry + return (Int128Low64(lhs) < Int128Low64(rhs)) + ? MakeInt128(Int128High64(result) - 1, Int128Low64(result)) + : result; +} +} // namespace int128_internal +constexpr int128 operator-(int128 lhs, int128 rhs) { + return int128_internal::SignedSubstructResult( + MakeInt128(Int128High64(lhs) - Int128High64(rhs), + Int128Low64(lhs) - Int128Low64(rhs)), + lhs, rhs); +} + +inline int128 operator*(int128 lhs, int128 rhs) { + return MakeInt128( + int128_internal::BitCastToSigned(Uint128High64(uint128(lhs) * rhs)), + Uint128Low64(uint128(lhs) * rhs)); +} + +inline int128 int128::operator++(int) { + int128 tmp(*this); + *this += 1; + return tmp; +} + +inline int128 int128::operator--(int) { + int128 tmp(*this); + *this -= 1; + return tmp; +} + +inline int128& int128::operator++() { + *this += 1; + return *this; +} + +inline int128& int128::operator--() { + *this -= 1; + return *this; +} + +constexpr int128 operator|(int128 lhs, int128 rhs) { + return MakeInt128(Int128High64(lhs) | Int128High64(rhs), + Int128Low64(lhs) | Int128Low64(rhs)); +} + +constexpr int128 operator&(int128 lhs, int128 rhs) { + return MakeInt128(Int128High64(lhs) & Int128High64(rhs), + Int128Low64(lhs) & Int128Low64(rhs)); +} + +constexpr int128 operator^(int128 lhs, int128 rhs) { + return MakeInt128(Int128High64(lhs) ^ Int128High64(rhs), + Int128Low64(lhs) ^ Int128Low64(rhs)); +} + +constexpr int128 operator<<(int128 lhs, int amount) { + // int64_t shifts of >= 63 are undefined, so we need some special-casing. + assert(amount >= 0 && amount < 127); + if (amount <= 0) { + return lhs; + } else if (amount < 63) { + return MakeInt128( + (Int128High64(lhs) << amount) | + static_cast(Int128Low64(lhs) >> (64 - amount)), + Int128Low64(lhs) << amount); + } else if (amount == 63) { + return MakeInt128(((Int128High64(lhs) << 32) << 31) | + static_cast(Int128Low64(lhs) >> 1), + (Int128Low64(lhs) << 32) << 31); + } else if (amount == 127) { + return MakeInt128(static_cast(Int128Low64(lhs) << 63), 0); + } else if (amount > 127) { + return MakeInt128(0, 0); + } else { + // amount >= 64 && amount < 127 + return MakeInt128(static_cast(Int128Low64(lhs) << (amount - 64)), + 0); + } +} + +constexpr int128 operator>>(int128 lhs, int amount) { + // int64_t shifts of >= 63 are undefined, so we need some special-casing. + assert(amount >= 0 && amount < 127); + if (amount <= 0) { + return lhs; + } else if (amount < 63) { + return MakeInt128( + Int128High64(lhs) >> amount, + Int128Low64(lhs) >> amount | static_cast(Int128High64(lhs)) + << (64 - amount)); + } else if (amount == 63) { + return MakeInt128((Int128High64(lhs) >> 32) >> 31, + static_cast(Int128High64(lhs) << 1) | + (Int128Low64(lhs) >> 32) >> 31); + + } else if (amount >= 127) { + return MakeInt128((Int128High64(lhs) >> 32) >> 31, + static_cast((Int128High64(lhs) >> 32) >> 31)); + } else { + // amount >= 64 && amount < 127 + return MakeInt128( + (Int128High64(lhs) >> 32) >> 31, + static_cast(Int128High64(lhs) >> (amount - 64))); + } +} diff --git a/CAPI/cpp/grpc/include/absl/numeric/internal/bits.h b/CAPI/cpp/grpc/include/absl/numeric/internal/bits.h new file mode 100644 index 00000000..15a43ced --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/internal/bits.h @@ -0,0 +1,385 @@ +// Copyright 2020 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_NUMERIC_INTERNAL_BITS_H_ +#define ABSL_NUMERIC_INTERNAL_BITS_H_ + +#include +#include +#include + +// Clang on Windows has __builtin_clzll; otherwise we need to use the +// windows intrinsic functions. +#if defined(_MSC_VER) && !defined(__clang__) +#include +#endif + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(__GNUC__) && !defined(__clang__) +// GCC +#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1 +#else +#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x) +#endif + +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \ + ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll) +#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr +#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1 +#else +#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT +#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0 +#endif + +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \ + ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll) +#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr +#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1 +#else +#define ABSL_INTERNAL_CONSTEXPR_CLZ +#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0 +#endif + +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \ + ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll) +#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr +#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1 +#else +#define ABSL_INTERNAL_CONSTEXPR_CTZ +#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0 +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace numeric_internal + { + + constexpr bool IsPowerOf2(unsigned int x) noexcept + { + return x != 0 && (x & (x - 1)) == 0; + } + + template + ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight( + T x, int s + ) noexcept + { + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); + + return static_cast(x >> (s & (std::numeric_limits::digits - 1))) | + static_cast(x << ((-s) & (std::numeric_limits::digits - 1))); + } + + template + ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft( + T x, int s + ) noexcept + { + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); + + return static_cast(x << (s & (std::numeric_limits::digits - 1))) | + static_cast(x >> ((-s) & (std::numeric_limits::digits - 1))); + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int + Popcount32(uint32_t x) noexcept + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount) + static_assert(sizeof(unsigned int) == sizeof(x), "__builtin_popcount does not take 32-bit arg"); + return __builtin_popcount(x); +#else + x -= ((x >> 1) & 0x55555555); + x = ((x >> 2) & 0x33333333) + (x & 0x33333333); + return static_cast((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24); +#endif + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int + Popcount64(uint64_t x) noexcept + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll) + static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int) + "__builtin_popcount does not take 64-bit arg"); + return __builtin_popcountll(x); +#else + x -= (x >> 1) & 0x5555555555555555ULL; + x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL); + return static_cast( + (((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56 + ); +#endif + } + + template + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int + Popcount(T x) noexcept + { + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); + static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large"); + return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x); + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int + CountLeadingZeroes32(uint32_t x) + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) + // Use __builtin_clz, which uses the following instructions: + // x86: bsr, lzcnt + // ARM64: clz + // PPC: cntlzd + + static_assert(sizeof(unsigned int) == sizeof(x), "__builtin_clz does not take 32-bit arg"); + // Handle 0 as a special case because __builtin_clz(0) is undefined. + return x == 0 ? 32 : __builtin_clz(x); +#elif defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse(&result, x)) + { + return 31 - result; + } + return 32; +#else + int zeroes = 28; + if (x >> 16) + { + zeroes -= 16; + x >>= 16; + } + if (x >> 8) + { + zeroes -= 8; + x >>= 8; + } + if (x >> 4) + { + zeroes -= 4; + x >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes; +#endif + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int + CountLeadingZeroes16(uint16_t x) + { +#if ABSL_HAVE_BUILTIN(__builtin_clzs) + static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int) + "__builtin_clzs does not take 16-bit arg"); + return x == 0 ? 16 : __builtin_clzs(x); +#else + return CountLeadingZeroes32(x) - 16; +#endif + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int + CountLeadingZeroes64(uint64_t x) + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll) + // Use __builtin_clzll, which uses the following instructions: + // x86: bsr, lzcnt + // ARM64: clz + // PPC: cntlzd + static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int) + "__builtin_clzll does not take 64-bit arg"); + + // Handle 0 as a special case because __builtin_clzll(0) is undefined. + return x == 0 ? 64 : __builtin_clzll(x); +#elif defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_X64) || defined(_M_ARM64)) + // MSVC does not have __buitin_clzll. Use _BitScanReverse64. + unsigned long result = 0; // NOLINT(runtime/int) + if (_BitScanReverse64(&result, x)) + { + return 63 - result; + } + return 64; +#elif defined(_MSC_VER) && !defined(__clang__) + // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse + unsigned long result = 0; // NOLINT(runtime/int) + if ((x >> 32) && + _BitScanReverse(&result, static_cast(x >> 32))) + { + return 31 - result; + } + if (_BitScanReverse(&result, static_cast(x))) + { + return 63 - result; + } + return 64; +#else + int zeroes = 60; + if (x >> 32) + { + zeroes -= 32; + x >>= 32; + } + if (x >> 16) + { + zeroes -= 16; + x >>= 16; + } + if (x >> 8) + { + zeroes -= 8; + x >>= 8; + } + if (x >> 4) + { + zeroes -= 4; + x >>= 4; + } + return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes; +#endif + } + + template + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int + CountLeadingZeroes(T x) + { + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); + static_assert(sizeof(T) <= sizeof(uint64_t), "T too large"); + return sizeof(T) <= sizeof(uint16_t) ? CountLeadingZeroes16(static_cast(x)) - + (std::numeric_limits::digits - + std::numeric_limits::digits) : + (sizeof(T) <= sizeof(uint32_t) ? CountLeadingZeroes32(static_cast(x)) - + (std::numeric_limits::digits - + std::numeric_limits::digits) : + CountLeadingZeroes64(x)); + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int + CountTrailingZeroesNonzero32(uint32_t x) + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) + static_assert(sizeof(unsigned int) == sizeof(x), "__builtin_ctz does not take 32-bit arg"); + return __builtin_ctz(x); +#elif defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward(&result, x); + return result; +#else + int c = 31; + x &= ~x + 1; + if (x & 0x0000FFFF) + c -= 16; + if (x & 0x00FF00FF) + c -= 8; + if (x & 0x0F0F0F0F) + c -= 4; + if (x & 0x33333333) + c -= 2; + if (x & 0x55555555) + c -= 1; + return c; +#endif + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int + CountTrailingZeroesNonzero64(uint64_t x) + { +#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll) + static_assert(sizeof(unsigned long long) == sizeof(x), // NOLINT(runtime/int) + "__builtin_ctzll does not take 64-bit arg"); + return __builtin_ctzll(x); +#elif defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_X64) || defined(_M_ARM64)) + unsigned long result = 0; // NOLINT(runtime/int) + _BitScanForward64(&result, x); + return result; +#elif defined(_MSC_VER) && !defined(__clang__) + unsigned long result = 0; // NOLINT(runtime/int) + if (static_cast(x) == 0) + { + _BitScanForward(&result, static_cast(x >> 32)); + return result + 32; + } + _BitScanForward(&result, static_cast(x)); + return result; +#else + int c = 63; + x &= ~x + 1; + if (x & 0x00000000FFFFFFFF) + c -= 32; + if (x & 0x0000FFFF0000FFFF) + c -= 16; + if (x & 0x00FF00FF00FF00FF) + c -= 8; + if (x & 0x0F0F0F0F0F0F0F0F) + c -= 4; + if (x & 0x3333333333333333) + c -= 2; + if (x & 0x5555555555555555) + c -= 1; + return c; +#endif + } + + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int + CountTrailingZeroesNonzero16(uint16_t x) + { +#if ABSL_HAVE_BUILTIN(__builtin_ctzs) + static_assert(sizeof(unsigned short) == sizeof(x), // NOLINT(runtime/int) + "__builtin_ctzs does not take 16-bit arg"); + return __builtin_ctzs(x); +#else + return CountTrailingZeroesNonzero32(x); +#endif + } + + template + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int + CountTrailingZeroes(T x) noexcept + { + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); + static_assert(sizeof(T) <= sizeof(uint64_t), "T too large"); + return x == 0 ? std::numeric_limits::digits : (sizeof(T) <= sizeof(uint16_t) ? CountTrailingZeroesNonzero16(static_cast(x)) : (sizeof(T) <= sizeof(uint32_t) ? CountTrailingZeroesNonzero32(static_cast(x)) : CountTrailingZeroesNonzero64(x))); + } + + // If T is narrower than unsigned, T{1} << bit_width will be promoted. We + // want to force it to wraparound so that bit_ceil of an invalid value are not + // core constant expressions. + template + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, T>::type + BitCeilPromotionHelper(T x, T promotion) + { + return (T{1} << (x + promotion)) >> promotion; + } + + template + ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline + typename std::enable_if::value, T>::type + BitCeilNonPowerOf2(T x) + { + // If T is narrower than unsigned, it undergoes promotion to unsigned when we + // shift. We calculate the number of bits added by the wider type. + return BitCeilPromotionHelper( + static_cast(std::numeric_limits::digits - CountLeadingZeroes(x)), + T{sizeof(T) >= sizeof(unsigned) ? 0 : std::numeric_limits::digits - std::numeric_limits::digits} + ); + } + + } // namespace numeric_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_NUMERIC_INTERNAL_BITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/numeric/internal/representation.h b/CAPI/cpp/grpc/include/absl/numeric/internal/representation.h new file mode 100644 index 00000000..22a17a75 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/numeric/internal/representation.h @@ -0,0 +1,58 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ +#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace numeric_internal + { + + // Returns true iff long double is represented as a pair of doubles added + // together. + inline constexpr bool IsDoubleDouble() + { + // A double-double value always has exactly twice the precision of a double + // value--one double carries the high digits and one double carries the low + // digits. This property is not shared with any other common floating-point + // representation, so this test won't trigger false positives. For reference, + // this table gives the number of bits of precision of each common + // floating-point representation: + // + // type precision + // IEEE single 24 b + // IEEE double 53 + // x86 long double 64 + // double-double 106 + // IEEE quadruple 113 + // + // Note in particular that a quadruple-precision float has greater precision + // than a double-double float despite taking up the same amount of memory; the + // quad has more of its bits allocated to the mantissa than the double-double + // has. + return std::numeric_limits::digits == + 2 * std::numeric_limits::digits; + } + + } // namespace numeric_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ diff --git a/CAPI/cpp/grpc/include/absl/profiling/internal/exponential_biased.h b/CAPI/cpp/grpc/include/absl/profiling/internal/exponential_biased.h new file mode 100644 index 00000000..a9eb04c0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/profiling/internal/exponential_biased.h @@ -0,0 +1,134 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ +#define ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace profiling_internal + { + + // ExponentialBiased provides a small and fast random number generator for a + // rounded exponential distribution. This generator manages very little state, + // and imposes no synchronization overhead. This makes it useful in specialized + // scenarios requiring minimum overhead, such as stride based periodic sampling. + // + // ExponentialBiased provides two closely related functions, GetSkipCount() and + // GetStride(), both returning a rounded integer defining a number of events + // required before some event with a given mean probability occurs. + // + // The distribution is useful to generate a random wait time or some periodic + // event with a given mean probability. For example, if an action is supposed to + // happen on average once every 'N' events, then we can get a random 'stride' + // counting down how long before the event to happen. For example, if we'd want + // to sample one in every 1000 'Frobber' calls, our code could look like this: + // + // Frobber::Frobber() { + // stride_ = exponential_biased_.GetStride(1000); + // } + // + // void Frobber::Frob(int arg) { + // if (--stride == 0) { + // SampleFrob(arg); + // stride_ = exponential_biased_.GetStride(1000); + // } + // ... + // } + // + // The rounding of the return value creates a bias, especially for smaller means + // where the distribution of the fraction is not evenly distributed. We correct + // this bias by tracking the fraction we rounded up or down on each iteration, + // effectively tracking the distance between the cumulative value, and the + // rounded cumulative value. For example, given a mean of 2: + // + // raw = 1.63076, cumulative = 1.63076, rounded = 2, bias = -0.36923 + // raw = 0.14624, cumulative = 1.77701, rounded = 2, bias = 0.14624 + // raw = 4.93194, cumulative = 6.70895, rounded = 7, bias = -0.06805 + // raw = 0.24206, cumulative = 6.95101, rounded = 7, bias = 0.24206 + // etc... + // + // Adjusting with rounding bias is relatively trivial: + // + // double value = bias_ + exponential_distribution(mean)(); + // double rounded_value = std::rint(value); + // bias_ = value - rounded_value; + // return rounded_value; + // + // This class is thread-compatible. + class ExponentialBiased + { + public: + // The number of bits set by NextRandom. + static constexpr int kPrngNumBits = 48; + + // `GetSkipCount()` returns the number of events to skip before some chosen + // event happens. For example, randomly tossing a coin, we will on average + // throw heads once before we get tails. We can simulate random coin tosses + // using GetSkipCount() as: + // + // ExponentialBiased eb; + // for (...) { + // int number_of_heads_before_tail = eb.GetSkipCount(1); + // for (int flips = 0; flips < number_of_heads_before_tail; ++flips) { + // printf("head..."); + // } + // printf("tail\n"); + // } + // + int64_t GetSkipCount(int64_t mean); + + // GetStride() returns the number of events required for a specific event to + // happen. See the class comments for a usage example. `GetStride()` is + // equivalent to `GetSkipCount(mean - 1) + 1`. When to use `GetStride()` or + // `GetSkipCount()` depends mostly on what best fits the use case. + int64_t GetStride(int64_t mean); + + // Computes a random number in the range [0, 1<<(kPrngNumBits+1) - 1] + // + // This is public to enable testing. + static uint64_t NextRandom(uint64_t rnd); + + private: + void Initialize(); + + uint64_t rng_{0}; + double bias_{0}; + bool initialized_{false}; + }; + + // Returns the next prng value. + // pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48 + // This is the lrand64 generator. + inline uint64_t ExponentialBiased::NextRandom(uint64_t rnd) + { + const uint64_t prng_mult = uint64_t{0x5DEECE66D}; + const uint64_t prng_add = 0xB; + const uint64_t prng_mod_power = 48; + const uint64_t prng_mod_mask = + ~((~static_cast(0)) << prng_mod_power); + return (prng_mult * rnd + prng_add) & prng_mod_mask; + } + + } // namespace profiling_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_INTERNAL_EXPONENTIAL_BIASED_H_ diff --git a/CAPI/cpp/grpc/include/absl/profiling/internal/periodic_sampler.h b/CAPI/cpp/grpc/include/absl/profiling/internal/periodic_sampler.h new file mode 100644 index 00000000..22638da1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/profiling/internal/periodic_sampler.h @@ -0,0 +1,219 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ +#define ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ + +#include + +#include + +#include "absl/base/optimization.h" +#include "absl/profiling/internal/exponential_biased.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace profiling_internal + { + + // PeriodicSamplerBase provides the basic period sampler implementation. + // + // This is the base class for the templated PeriodicSampler class, which holds + // a global std::atomic value identified by a user defined tag, such that + // each specific PeriodSampler implementation holds its own global period. + // + // PeriodicSamplerBase is thread-compatible except where stated otherwise. + class PeriodicSamplerBase + { + public: + // PeriodicSamplerBase is trivial / copyable / movable / destructible. + PeriodicSamplerBase() = default; + PeriodicSamplerBase(PeriodicSamplerBase&&) = default; + PeriodicSamplerBase(const PeriodicSamplerBase&) = default; + + // Returns true roughly once every `period` calls. This is established by a + // randomly picked `stride` that is counted down on each call to `Sample`. + // This stride is picked such that the probability of `Sample()` returning + // true is 1 in `period`. + inline bool Sample() noexcept; + + // The below methods are intended for optimized use cases where the + // size of the inlined fast path code is highly important. Applications + // should use the `Sample()` method unless they have proof that their + // specific use case requires the optimizations offered by these methods. + // + // An example of such a use case is SwissTable sampling. All sampling checks + // are in inlined SwissTable methods, and the number of call sites is huge. + // In this case, the inlined code size added to each translation unit calling + // SwissTable methods is non-trivial. + // + // The `SubtleMaybeSample()` function spuriously returns true even if the + // function should not be sampled, applications MUST match each call to + // 'SubtleMaybeSample()' returning true with a `SubtleConfirmSample()` call, + // and use the result of the latter as the sampling decision. + // In other words: the code should logically be equivalent to: + // + // if (SubtleMaybeSample() && SubtleConfirmSample()) { + // // Sample this call + // } + // + // In the 'inline-size' optimized case, the `SubtleConfirmSample()` call can + // be placed out of line, for example, the typical use case looks as follows: + // + // // --- frobber.h ----------- + // void FrobberSampled(); + // + // inline void FrobberImpl() { + // // ... + // } + // + // inline void Frobber() { + // if (ABSL_PREDICT_FALSE(sampler.SubtleMaybeSample())) { + // FrobberSampled(); + // } else { + // FrobberImpl(); + // } + // } + // + // // --- frobber.cc ----------- + // void FrobberSampled() { + // if (!sampler.SubtleConfirmSample())) { + // // Spurious false positive + // FrobberImpl(); + // return; + // } + // + // // Sampled execution + // // ... + // } + inline bool SubtleMaybeSample() noexcept; + bool SubtleConfirmSample() noexcept; + + protected: + // We explicitly don't use a virtual destructor as this class is never + // virtually destroyed, and it keeps the class trivial, which avoids TLS + // prologue and epilogue code for our TLS instances. + ~PeriodicSamplerBase() = default; + + // Returns the next stride for our sampler. + // This function is virtual for testing purposes only. + virtual int64_t GetExponentialBiased(int period) noexcept; + + private: + // Returns the current period of this sampler. Thread-safe. + virtual int period() const noexcept = 0; + + // Keep and decrement stride_ as an unsigned integer, but compare the value + // to zero casted as a signed int. clang and msvc do not create optimum code + // if we use signed for the combined decrement and sign comparison. + // + // Below 3 alternative options, all compiles generate the best code + // using the unsigned increment <---> signed int comparison option. + // + // Option 1: + // int64_t stride_; + // if (ABSL_PREDICT_TRUE(++stride_ < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/R5MzzA + // GCC ppc (OK) : https://gcc.godbolt.org/z/z7NZAt + // Clang x64 (BAD): https://gcc.godbolt.org/z/t4gPsd + // ICC x64 (OK) : https://gcc.godbolt.org/z/rE6s8W + // MSVC x64 (OK) : https://gcc.godbolt.org/z/ARMXqS + // + // Option 2: + // int64_t stride_ = 0; + // if (ABSL_PREDICT_TRUE(--stride_ >= 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/jSQxYK + // GCC ppc (OK) : https://gcc.godbolt.org/z/VJdYaA + // Clang x64 (BAD): https://gcc.godbolt.org/z/Xm4NjX + // ICC x64 (OK) : https://gcc.godbolt.org/z/4snaFd + // MSVC x64 (BAD): https://gcc.godbolt.org/z/BgnEKE + // + // Option 3: + // uint64_t stride_; + // if (ABSL_PREDICT_TRUE(static_cast(++stride_) < 0)) { ... } + // + // GCC x64 (OK) : https://gcc.godbolt.org/z/bFbfPy + // GCC ppc (OK) : https://gcc.godbolt.org/z/S9KkUE + // Clang x64 (OK) : https://gcc.godbolt.org/z/UYzRb4 + // ICC x64 (OK) : https://gcc.godbolt.org/z/ptTNfD + // MSVC x64 (OK) : https://gcc.godbolt.org/z/76j4-5 + uint64_t stride_ = 0; + absl::profiling_internal::ExponentialBiased rng_; + }; + + inline bool PeriodicSamplerBase::SubtleMaybeSample() noexcept + { + // See comments on `stride_` for the unsigned increment / signed compare. + if (ABSL_PREDICT_TRUE(static_cast(++stride_) < 0)) + { + return false; + } + return true; + } + + inline bool PeriodicSamplerBase::Sample() noexcept + { + return ABSL_PREDICT_FALSE(SubtleMaybeSample()) ? SubtleConfirmSample() : false; + } + + // PeriodicSampler is a concreted periodic sampler implementation. + // The user provided Tag identifies the implementation, and is required to + // isolate the global state of this instance from other instances. + // + // Typical use case: + // + // struct HashTablezTag {}; + // thread_local PeriodicSampler sampler; + // + // void HashTableSamplingLogic(...) { + // if (sampler.Sample()) { + // HashTableSlowSamplePath(...); + // } + // } + // + template + class PeriodicSampler final : public PeriodicSamplerBase + { + public: + ~PeriodicSampler() = default; + + int period() const noexcept final + { + return period_.load(std::memory_order_relaxed); + } + + // Sets the global period for this sampler. Thread-safe. + // Setting a period of 0 disables the sampler, i.e., every call to Sample() + // will return false. Setting a period of 1 puts the sampler in 'always on' + // mode, i.e., every call to Sample() returns true. + static void SetGlobalPeriod(int period) + { + period_.store(period, std::memory_order_relaxed); + } + + private: + static std::atomic period_; + }; + + template + std::atomic PeriodicSampler::period_(default_period); + + } // namespace profiling_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_INTERNAL_PERIODIC_SAMPLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/profiling/internal/sample_recorder.h b/CAPI/cpp/grpc/include/absl/profiling/internal/sample_recorder.h new file mode 100644 index 00000000..b4945b22 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/profiling/internal/sample_recorder.h @@ -0,0 +1,278 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: sample_recorder.h +// ----------------------------------------------------------------------------- +// +// This header file defines a lock-free linked list for recording samples +// collected from a random/stochastic process. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ +#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace profiling_internal + { + + // Sample that has members required for linking samples in the linked list of + // samples maintained by the SampleRecorder. Type T defines the sampled data. + template + struct Sample + { + // Guards the ability to restore the sample to a pristine state. This + // prevents races with sampling and resurrecting an object. + absl::Mutex init_mu; + T* next = nullptr; + T* dead ABSL_GUARDED_BY(init_mu) = nullptr; + int64_t weight; // How many sampling events were required to sample this one. + }; + + // Holds samples and their associated stack traces with a soft limit of + // `SetHashtablezMaxSamples()`. + // + // Thread safe. + template + class SampleRecorder + { + public: + SampleRecorder(); + ~SampleRecorder(); + + // Registers for sampling. Returns an opaque registration info. + template + T* Register(Targs&&... args); + + // Unregisters the sample. + void Unregister(T* sample); + + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const T&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + + // Iterates over all the registered `StackInfo`s. Returning the number of + // samples that have been dropped. + int64_t Iterate(const std::function& f); + + size_t GetMaxSamples() const; + void SetMaxSamples(size_t max); + + private: + void PushNew(T* sample); + void PushDead(T* sample); + template + T* PopDead(Targs... args); + + std::atomic dropped_samples_; + std::atomic size_estimate_; + std::atomic max_samples_{1 << 20}; + + // Intrusive lock free linked lists for tracking samples. + // + // `all_` records all samples (they are never removed from this list) and is + // terminated with a `nullptr`. + // + // `graveyard_.dead` is a circular linked list. When it is empty, + // `graveyard_.dead == &graveyard`. The list is circular so that + // every item on it (even the last) has a non-null dead pointer. This allows + // `Iterate` to determine if a given sample is live or dead using only + // information on the sample itself. + // + // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead + // looks like this (G is the Graveyard): + // + // +---+ +---+ +---+ +---+ +---+ + // all -->| A |--->| B |--->| C |--->| D |--->| E | + // | | | | | | | | | | + // +---+ | | +->| |-+ | | +->| |-+ | | + // | G | +---+ | +---+ | +---+ | +---+ | +---+ + // | | | | | | + // | | --------+ +--------+ | + // +---+ | + // ^ | + // +--------------------------------------+ + // + std::atomic all_; + T graveyard_; + + std::atomic dispose_; + }; + + template + typename SampleRecorder::DisposeCallback + SampleRecorder::SetDisposeCallback(DisposeCallback f) + { + return dispose_.exchange(f, std::memory_order_relaxed); + } + + template + SampleRecorder::SampleRecorder() : + dropped_samples_(0), + size_estimate_(0), + all_(nullptr), + dispose_(nullptr) + { + absl::MutexLock l(&graveyard_.init_mu); + graveyard_.dead = &graveyard_; + } + + template + SampleRecorder::~SampleRecorder() + { + T* s = all_.load(std::memory_order_acquire); + while (s != nullptr) + { + T* next = s->next; + delete s; + s = next; + } + } + + template + void SampleRecorder::PushNew(T* sample) + { + sample->next = all_.load(std::memory_order_relaxed); + while (!all_.compare_exchange_weak(sample->next, sample, std::memory_order_release, std::memory_order_relaxed)) + { + } + } + + template + void SampleRecorder::PushDead(T* sample) + { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) + { + dispose(*sample); + } + + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + absl::MutexLock sample_lock(&sample->init_mu); + sample->dead = graveyard_.dead; + graveyard_.dead = sample; + } + + template + template + T* SampleRecorder::PopDead(Targs... args) + { + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + + // The list is circular, so eventually it collapses down to + // graveyard_.dead == &graveyard_ + // when it is empty. + T* sample = graveyard_.dead; + if (sample == &graveyard_) + return nullptr; + + absl::MutexLock sample_lock(&sample->init_mu); + graveyard_.dead = sample->dead; + sample->dead = nullptr; + sample->PrepareForSampling(std::forward(args)...); + return sample; + } + + template + template + T* SampleRecorder::Register(Targs&&... args) + { + size_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); + if (size > max_samples_.load(std::memory_order_relaxed)) + { + size_estimate_.fetch_sub(1, std::memory_order_relaxed); + dropped_samples_.fetch_add(1, std::memory_order_relaxed); + return nullptr; + } + + T* sample = PopDead(args...); + if (sample == nullptr) + { + // Resurrection failed. Hire a new warlock. + sample = new T(); + { + absl::MutexLock sample_lock(&sample->init_mu); + // If flag initialization happens to occur (perhaps in another thread) + // while in this block, it will lock `graveyard_` which is usually always + // locked before any sample. This will appear as a lock inversion. + // However, this code is run exactly once per sample, and this sample + // cannot be accessed until after it is returned from this method. This + // means that this lock state can never be recreated, so we can safely + // inform the deadlock detector to ignore it. + sample->init_mu.ForgetDeadlockInfo(); + sample->PrepareForSampling(std::forward(args)...); + } + PushNew(sample); + } + + return sample; + } + + template + void SampleRecorder::Unregister(T* sample) + { + PushDead(sample); + size_estimate_.fetch_sub(1, std::memory_order_relaxed); + } + + template + int64_t SampleRecorder::Iterate( + const std::function& f + ) + { + T* s = all_.load(std::memory_order_acquire); + while (s != nullptr) + { + absl::MutexLock l(&s->init_mu); + if (s->dead == nullptr) + { + f(*s); + } + s = s->next; + } + + return dropped_samples_.load(std::memory_order_relaxed); + } + + template + void SampleRecorder::SetMaxSamples(size_t max) + { + max_samples_.store(max, std::memory_order_release); + } + + template + size_t SampleRecorder::GetMaxSamples() const + { + return max_samples_.load(std::memory_order_acquire); + } + + } // namespace profiling_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/bernoulli_distribution.h b/CAPI/cpp/grpc/include/absl/random/bernoulli_distribution.h new file mode 100644 index 00000000..3b85278d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/bernoulli_distribution.h @@ -0,0 +1,246 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_BERNOULLI_DISTRIBUTION_H_ +#define ABSL_RANDOM_BERNOULLI_DISTRIBUTION_H_ + +#include +#include +#include + +#include "absl/base/optimization.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::bernoulli_distribution is a drop in replacement for + // std::bernoulli_distribution. It guarantees that (given a perfect + // UniformRandomBitGenerator) the acceptance probability is *exactly* equal to + // the given double. + // + // The implementation assumes that double is IEEE754 + class bernoulli_distribution + { + public: + using result_type = bool; + + class param_type + { + public: + using distribution_type = bernoulli_distribution; + + explicit param_type(double p = 0.5) : + prob_(p) + { + assert(p >= 0.0 && p <= 1.0); + } + + double p() const + { + return prob_; + } + + friend bool operator==(const param_type& p1, const param_type& p2) + { + return p1.p() == p2.p(); + } + friend bool operator!=(const param_type& p1, const param_type& p2) + { + return p1.p() != p2.p(); + } + + private: + double prob_; + }; + + bernoulli_distribution() : + bernoulli_distribution(0.5) + { + } + + explicit bernoulli_distribution(double p) : + param_(p) + { + } + + explicit bernoulli_distribution(param_type p) : + param_(p) + { + } + + // no-op + void reset() + { + } + + template + bool operator()(URBG& g) + { // NOLINT(runtime/references) + return Generate(param_.p(), g); + } + + template + bool operator()(URBG& g, // NOLINT(runtime/references) + const param_type& param) + { + return Generate(param.p(), g); + } + + param_type param() const + { + return param_; + } + void param(const param_type& param) + { + param_ = param; + } + + double p() const + { + return param_.p(); + } + + result_type(min)() const + { + return false; + } + result_type(max)() const + { + return true; + } + + friend bool operator==(const bernoulli_distribution& d1, const bernoulli_distribution& d2) + { + return d1.param_ == d2.param_; + } + + friend bool operator!=(const bernoulli_distribution& d1, const bernoulli_distribution& d2) + { + return d1.param_ != d2.param_; + } + + private: + static constexpr uint64_t kP32 = static_cast(1) << 32; + + template + static bool Generate(double p, URBG& g); // NOLINT(runtime/references) + + param_type param_; + }; + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const bernoulli_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.p(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + bernoulli_distribution& x + ) + { // NOLINT(runtime/references) + auto saver = random_internal::make_istream_state_saver(is); + auto p = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(bernoulli_distribution::param_type(p)); + } + return is; + } + + template + bool bernoulli_distribution::Generate(double p, URBG& g) + { // NOLINT(runtime/references) + random_internal::FastUniformBits fast_u32; + + while (true) + { + // There are two aspects of the definition of `c` below that are worth + // commenting on. First, because `p` is in the range [0, 1], `c` is in the + // range [0, 2^32] which does not fit in a uint32_t and therefore requires + // 64 bits. + // + // Second, `c` is constructed by first casting explicitly to a signed + // integer and then casting explicitly to an unsigned integer of the same + // size. This is done because the hardware conversion instructions produce + // signed integers from double; if taken as a uint64_t the conversion would + // be wrong for doubles greater than 2^63 (not relevant in this use-case). + // If converted directly to an unsigned integer, the compiler would end up + // emitting code to handle such large values that are not relevant due to + // the known bounds on `c`. To avoid these extra instructions this + // implementation converts first to the signed type and then convert to + // unsigned (which is a no-op). + const uint64_t c = static_cast(static_cast(p * kP32)); + const uint32_t v = fast_u32(g); + // FAST PATH: this path fails with probability 1/2^32. Note that simply + // returning v <= c would approximate P very well (up to an absolute error + // of 1/2^32); the slow path (taken in that range of possible error, in the + // case of equality) eliminates the remaining error. + if (ABSL_PREDICT_TRUE(v != c)) + return v < c; + + // It is guaranteed that `q` is strictly less than 1, because if `q` were + // greater than or equal to 1, the same would be true for `p`. Certainly `p` + // cannot be greater than 1, and if `p == 1`, then the fast path would + // necessary have been taken already. + const double q = static_cast(c) / kP32; + + // The probability of acceptance on the fast path is `q` and so the + // probability of acceptance here should be `p - q`. + // + // Note that `q` is obtained from `p` via some shifts and conversions, the + // upshot of which is that `q` is simply `p` with some of the + // least-significant bits of its mantissa set to zero. This means that the + // difference `p - q` will not have any rounding errors. To see why, pretend + // that double has 10 bits of resolution and q is obtained from `p` in such + // a way that the 4 least-significant bits of its mantissa are set to zero. + // For example: + // p = 1.1100111011 * 2^-1 + // q = 1.1100110000 * 2^-1 + // p - q = 1.011 * 2^-8 + // The difference `p - q` has exactly the nonzero mantissa bits that were + // "lost" in `q` producing a number which is certainly representable in a + // double. + const double left = p - q; + + // By construction, the probability of being on this slow path is 1/2^32, so + // P(accept in slow path) = P(accept| in slow path) * P(slow path), + // which means the probability of acceptance here is `1 / (left * kP32)`: + const double here = left * kP32; + + // The simplest way to compute the result of this trial is to repeat the + // whole algorithm with the new probability. This terminates because even + // given arbitrarily unfriendly "random" bits, each iteration either + // multiplies a tiny probability by 2^32 (if c == 0) or strips off some + // number of nonzero mantissa bits. That process is bounded. + if (here == 0) + return false; + p = here; + } + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_BERNOULLI_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/beta_distribution.h b/CAPI/cpp/grpc/include/absl/random/beta_distribution.h new file mode 100644 index 00000000..534b44d1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/beta_distribution.h @@ -0,0 +1,511 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_BETA_DISTRIBUTION_H_ +#define ABSL_RANDOM_BETA_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/fastmath.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::beta_distribution: + // Generate a floating-point variate conforming to a Beta distribution: + // pdf(x) \propto x^(alpha-1) * (1-x)^(beta-1), + // where the params alpha and beta are both strictly positive real values. + // + // The support is the open interval (0, 1), but the return value might be equal + // to 0 or 1, due to numerical errors when alpha and beta are very different. + // + // Usage note: One usage is that alpha and beta are counts of number of + // successes and failures. When the total number of trials are large, consider + // approximating a beta distribution with a Gaussian distribution with the same + // mean and variance. One could use the skewness, which depends only on the + // smaller of alpha and beta when the number of trials are sufficiently large, + // to quantify how far a beta distribution is from the normal distribution. + template + class beta_distribution + { + public: + using result_type = RealType; + + class param_type + { + public: + using distribution_type = beta_distribution; + + explicit param_type(result_type alpha, result_type beta) : + alpha_(alpha), + beta_(beta) + { + assert(alpha >= 0); + assert(beta >= 0); + assert(alpha <= (std::numeric_limits::max)()); + assert(beta <= (std::numeric_limits::max)()); + if (alpha == 0 || beta == 0) + { + method_ = DEGENERATE_SMALL; + x_ = (alpha >= beta) ? 1 : 0; + return; + } + // a_ = min(beta, alpha), b_ = max(beta, alpha). + if (beta < alpha) + { + inverted_ = true; + a_ = beta; + b_ = alpha; + } + else + { + inverted_ = false; + a_ = alpha; + b_ = beta; + } + if (a_ <= 1 && b_ >= ThresholdForLargeA()) + { + method_ = DEGENERATE_SMALL; + x_ = inverted_ ? result_type(1) : result_type(0); + return; + } + // For threshold values, see also: + // Evaluation of Beta Generation Algorithms, Ying-Chao Hung, et. al. + // February, 2009. + if ((b_ < 1.0 && a_ + b_ <= 1.2) || a_ <= ThresholdForSmallA()) + { + // Choose Joehnk over Cheng when it's faster or when Cheng encounters + // numerical issues. + method_ = JOEHNK; + a_ = result_type(1) / alpha_; + b_ = result_type(1) / beta_; + if (std::isinf(a_) || std::isinf(b_)) + { + method_ = DEGENERATE_SMALL; + x_ = inverted_ ? result_type(1) : result_type(0); + } + return; + } + if (a_ >= ThresholdForLargeA()) + { + method_ = DEGENERATE_LARGE; + // Note: on PPC for long double, evaluating + // `std::numeric_limits::max() / ThresholdForLargeA` results in NaN. + result_type r = a_ / b_; + x_ = (inverted_ ? result_type(1) : r) / (1 + r); + return; + } + x_ = a_ + b_; + log_x_ = std::log(x_); + if (a_ <= 1) + { + method_ = CHENG_BA; + y_ = result_type(1) / a_; + gamma_ = a_ + a_; + return; + } + method_ = CHENG_BB; + result_type r = (a_ - 1) / (b_ - 1); + y_ = std::sqrt((1 + r) / (b_ * r * 2 - r + 1)); + gamma_ = a_ + result_type(1) / y_; + } + + result_type alpha() const + { + return alpha_; + } + result_type beta() const + { + return beta_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.alpha_ == b.alpha_ && a.beta_ == b.beta_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class beta_distribution; + +#ifdef _MSC_VER + // MSVC does not have constexpr implementations for std::log and std::exp + // so they are computed at runtime. +#define ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR +#else +#define ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR constexpr +#endif + + // The threshold for whether std::exp(1/a) is finite. + // Note that this value is quite large, and a smaller a_ is NOT abnormal. + static ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR result_type + ThresholdForSmallA() + { + return result_type(1) / + std::log((std::numeric_limits::max)()); + } + + // The threshold for whether a * std::log(a) is finite. + static ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR result_type + ThresholdForLargeA() + { + return std::exp( + std::log((std::numeric_limits::max)()) - + std::log(std::log((std::numeric_limits::max)())) - + ThresholdPadding() + ); + } + +#undef ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR + + // Pad the threshold for large A for long double on PPC. This is done via a + // template specialization below. + static constexpr result_type ThresholdPadding() + { + return 0; + } + + enum Method + { + JOEHNK, // Uses algorithm Joehnk + CHENG_BA, // Uses algorithm BA in Cheng + CHENG_BB, // Uses algorithm BB in Cheng + + // Note: See also: + // Hung et al. Evaluation of beta generation algorithms. Communications + // in Statistics-Simulation and Computation 38.4 (2009): 750-770. + // especially: + // Zechner, Heinz, and Ernst Stadlober. Generating beta variates via + // patchwork rejection. Computing 50.1 (1993): 1-18. + + DEGENERATE_SMALL, // a_ is abnormally small. + DEGENERATE_LARGE, // a_ is abnormally large. + }; + + result_type alpha_; + result_type beta_; + + result_type a_; // the smaller of {alpha, beta}, or 1.0/alpha_ in JOEHNK + result_type b_; // the larger of {alpha, beta}, or 1.0/beta_ in JOEHNK + result_type x_; // alpha + beta, or the result in degenerate cases + result_type log_x_; // log(x_) + result_type y_; // "beta" in Cheng + result_type gamma_; // "gamma" in Cheng + + Method method_; + + // Placing this last for optimal alignment. + // Whether alpha_ != a_, i.e. true iff alpha_ > beta_. + bool inverted_; + + static_assert(std::is_floating_point::value, "Class-template absl::beta_distribution<> must be " + "parameterized using a floating-point type."); + }; + + beta_distribution() : + beta_distribution(1) + { + } + + explicit beta_distribution(result_type alpha, result_type beta = 1) : + param_(alpha, beta) + { + } + + explicit beta_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + // Generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return 0; + } + result_type(max)() const + { + return 1; + } + + result_type alpha() const + { + return param_.alpha(); + } + result_type beta() const + { + return param_.beta(); + } + + friend bool operator==(const beta_distribution& a, const beta_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const beta_distribution& a, const beta_distribution& b) + { + return a.param_ != b.param_; + } + + private: + template + result_type AlgorithmJoehnk(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + template + result_type AlgorithmCheng(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + template + result_type DegenerateCase(URBG& g, // NOLINT(runtime/references) + const param_type& p) + { + if (p.method_ == param_type::DEGENERATE_SMALL && p.alpha_ == p.beta_) + { + // Returns 0 or 1 with equal probability. + random_internal::FastUniformBits fast_u8; + return static_cast((fast_u8(g) & 0x10) != 0); // pick any single bit. + } + return p.x_; + } + + param_type param_; + random_internal::FastUniformBits fast_u64_; + }; + +#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ + defined(__ppc__) || defined(__PPC__) + // PPC needs a more stringent boundary for long double. + template<> + constexpr long double + beta_distribution::param_type::ThresholdPadding() + { + return 10; + } +#endif + + template + template + typename beta_distribution::result_type + beta_distribution::AlgorithmJoehnk( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + using real_type = + absl::conditional_t::value, float, double>; + + // Based on Joehnk, M. D. Erzeugung von betaverteilten und gammaverteilten + // Zufallszahlen. Metrika 8.1 (1964): 5-15. + // This method is described in Knuth, Vol 2 (Third Edition), pp 134. + + result_type u, v, x, y, z; + for (;;) + { + u = GenerateRealFromBits( + fast_u64_(g) + ); + v = GenerateRealFromBits( + fast_u64_(g) + ); + + // Direct method. std::pow is slow for float, so rely on the optimizer to + // remove the std::pow() path for that case. + if (!std::is_same::value) + { + x = std::pow(u, p.a_); + y = std::pow(v, p.b_); + z = x + y; + if (z > 1) + { + // Reject if and only if `x + y > 1.0` + continue; + } + if (z > 0) + { + // When both alpha and beta are small, x and y are both close to 0, so + // divide by (x+y) directly may result in nan. + return x / z; + } + } + + // Log transform. + // x = log( pow(u, p.a_) ), y = log( pow(v, p.b_) ) + // since u, v <= 1.0, x, y < 0. + x = std::log(u) * p.a_; + y = std::log(v) * p.b_; + if (!std::isfinite(x) || !std::isfinite(y)) + { + continue; + } + // z = log( pow(u, a) + pow(v, b) ) + z = x > y ? (x + std::log(1 + std::exp(y - x))) : (y + std::log(1 + std::exp(x - y))); + // Reject iff log(x+y) > 0. + if (z > 0) + { + continue; + } + return std::exp(x - z); + } + } + + template + template + typename beta_distribution::result_type + beta_distribution::AlgorithmCheng( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + using real_type = + absl::conditional_t::value, float, double>; + + // Based on Cheng, Russell CH. Generating beta variates with nonintegral + // shape parameters. Communications of the ACM 21.4 (1978): 317-322. + // (https://dl.acm.org/citation.cfm?id=359482). + static constexpr result_type kLogFour = + result_type(1.3862943611198906188344642429163531361); // log(4) + static constexpr result_type kS = + result_type(2.6094379124341003746007593332261876); // 1+log(5) + + const bool use_algorithm_ba = (p.method_ == param_type::CHENG_BA); + result_type u1, u2, v, w, z, r, s, t, bw_inv, lhs; + for (;;) + { + u1 = GenerateRealFromBits( + fast_u64_(g) + ); + u2 = GenerateRealFromBits( + fast_u64_(g) + ); + v = p.y_ * std::log(u1 / (1 - u1)); + w = p.a_ * std::exp(v); + bw_inv = result_type(1) / (p.b_ + w); + r = p.gamma_ * v - kLogFour; + s = p.a_ + r - w; + z = u1 * u1 * u2; + if (!use_algorithm_ba && s + kS >= 5 * z) + { + break; + } + t = std::log(z); + if (!use_algorithm_ba && s >= t) + { + break; + } + lhs = p.x_ * (p.log_x_ + std::log(bw_inv)) + r; + if (lhs >= t) + { + break; + } + } + return p.inverted_ ? (1 - w * bw_inv) : w * bw_inv; + } + + template + template + typename beta_distribution::result_type + beta_distribution::operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p) + { + switch (p.method_) + { + case param_type::JOEHNK: + return AlgorithmJoehnk(g, p); + case param_type::CHENG_BA: + ABSL_FALLTHROUGH_INTENDED; + case param_type::CHENG_BB: + return AlgorithmCheng(g, p); + default: + return DegenerateCase(g, p); + } + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const beta_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.alpha() << os.fill() << x.beta(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + beta_distribution& x + ) + { // NOLINT(runtime/references) + using result_type = typename beta_distribution::result_type; + using param_type = typename beta_distribution::param_type; + result_type alpha, beta; + + auto saver = random_internal::make_istream_state_saver(is); + alpha = random_internal::read_floating_point(is); + if (is.fail()) + return is; + beta = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(param_type(alpha, beta)); + } + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_BETA_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/bit_gen_ref.h b/CAPI/cpp/grpc/include/absl/random/bit_gen_ref.h new file mode 100644 index 00000000..263669a1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/bit_gen_ref.h @@ -0,0 +1,200 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: bit_gen_ref.h +// ----------------------------------------------------------------------------- +// +// This header defines a bit generator "reference" class, for use in interfaces +// that take both Abseil (e.g. `absl::BitGen`) and standard library (e.g. +// `std::mt19937`) bit generators. + +#ifndef ABSL_RANDOM_BIT_GEN_REF_H_ +#define ABSL_RANDOM_BIT_GEN_REF_H_ + +#include +#include +#include + +#include "absl/base/internal/fast_type_id.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/distribution_caller.h" +#include "absl/random/internal/fast_uniform_bits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + template + struct is_urbg : std::false_type + { + }; + + template + struct is_urbg< + URBG, + absl::enable_if_t::type>::value>, + absl::enable_if_t::type>::value>, + absl::enable_if_t()())>::type>::value>> : std::true_type + { + }; + + template + struct DistributionCaller; + class MockHelpers; + + } // namespace random_internal + + // ----------------------------------------------------------------------------- + // absl::BitGenRef + // ----------------------------------------------------------------------------- + // + // `absl::BitGenRef` is a type-erasing class that provides a generator-agnostic + // non-owning "reference" interface for use in place of any specific uniform + // random bit generator (URBG). This class may be used for both Abseil + // (e.g. `absl::BitGen`, `absl::InsecureBitGen`) and Standard library (e.g + // `std::mt19937`, `std::minstd_rand`) bit generators. + // + // Like other reference classes, `absl::BitGenRef` does not own the + // underlying bit generator, and the underlying instance must outlive the + // `absl::BitGenRef`. + // + // `absl::BitGenRef` is particularly useful when used with an + // `absl::MockingBitGen` to test specific paths in functions which use random + // values. + // + // Example: + // void TakesBitGenRef(absl::BitGenRef gen) { + // int x = absl::Uniform(gen, 0, 1000); + // } + // + class BitGenRef + { + // SFINAE to detect whether the URBG type includes a member matching + // bool InvokeMock(base_internal::FastTypeIdType, void*, void*). + // + // These live inside BitGenRef so that they have friend access + // to MockingBitGen. (see similar methods in DistributionCaller). + template class Trait, class AlwaysVoid, class... Args> + struct detector : std::false_type + { + }; + template class Trait, class... Args> + struct detector>, Args...> : std::true_type + { + }; + + template + using invoke_mock_t = decltype(std::declval()->InvokeMock( + std::declval(), std::declval(), std::declval() + )); + + template + using HasInvokeMock = typename detector::type; + + public: + BitGenRef(const BitGenRef&) = default; + BitGenRef(BitGenRef&&) = default; + BitGenRef& operator=(const BitGenRef&) = default; + BitGenRef& operator=(BitGenRef&&) = default; + + template::value && random_internal::is_urbg::value && !HasInvokeMock::value)>* = nullptr> + BitGenRef(URBG& gen) // NOLINT + : + t_erased_gen_ptr_(reinterpret_cast(&gen)), + mock_call_(NotAMock), + generate_impl_fn_(ImplFn) + { + } + + template::value && random_internal::is_urbg::value && HasInvokeMock::value)>* = nullptr> + BitGenRef(URBG& gen) // NOLINT + : + t_erased_gen_ptr_(reinterpret_cast(&gen)), + mock_call_(&MockCall), + generate_impl_fn_(ImplFn) + { + } + + using result_type = uint64_t; + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + result_type operator()() + { + return generate_impl_fn_(t_erased_gen_ptr_); + } + + private: + using impl_fn = result_type (*)(uintptr_t); + using mock_call_fn = bool (*)(uintptr_t, base_internal::FastTypeIdType, void*, void*); + + template + static result_type ImplFn(uintptr_t ptr) + { + // Ensure that the return values from operator() fill the entire + // range promised by result_type, min() and max(). + absl::random_internal::FastUniformBits fast_uniform_bits; + return fast_uniform_bits(*reinterpret_cast(ptr)); + } + + // Get a type-erased InvokeMock pointer. + template + static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType type, void* result, void* arg_tuple) + { + return reinterpret_cast(gen_ptr)->InvokeMock(type, result, arg_tuple); + } + static bool NotAMock(uintptr_t, base_internal::FastTypeIdType, void*, void*) + { + return false; + } + + inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple, void* result) + { + if (mock_call_ == NotAMock) + return false; // avoids an indirect call. + return mock_call_(t_erased_gen_ptr_, type, args_tuple, result); + } + + uintptr_t t_erased_gen_ptr_; + mock_call_fn mock_call_; + impl_fn generate_impl_fn_; + + template + friend struct ::absl::random_internal::DistributionCaller; // for InvokeMock + friend class ::absl::random_internal::MockHelpers; // for InvokeMock + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_BIT_GEN_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/discrete_distribution.h b/CAPI/cpp/grpc/include/absl/random/discrete_distribution.h new file mode 100644 index 00000000..d3dd7273 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/discrete_distribution.h @@ -0,0 +1,307 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_DISCRETE_DISTRIBUTION_H_ +#define ABSL_RANDOM_DISCRETE_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/random/bernoulli_distribution.h" +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/uniform_int_distribution.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::discrete_distribution + // + // A discrete distribution produces random integers i, where 0 <= i < n + // distributed according to the discrete probability function: + // + // P(i|p0,...,pn−1)=pi + // + // This class is an implementation of discrete_distribution (see + // [rand.dist.samp.discrete]). + // + // The algorithm used is Walker's Aliasing algorithm, described in Knuth, Vol 2. + // absl::discrete_distribution takes O(N) time to precompute the probabilities + // (where N is the number of possible outcomes in the distribution) at + // construction, and then takes O(1) time for each variate generation. Many + // other implementations also take O(N) time to construct an ordered sequence of + // partial sums, plus O(log N) time per variate to binary search. + // + template + class discrete_distribution + { + public: + using result_type = IntType; + + class param_type + { + public: + using distribution_type = discrete_distribution; + + param_type() + { + init(); + } + + template + explicit param_type(InputIterator begin, InputIterator end) : + p_(begin, end) + { + init(); + } + + explicit param_type(std::initializer_list weights) : + p_(weights) + { + init(); + } + + template + explicit param_type(size_t nw, double xmin, double xmax, UnaryOperation fw) + { + if (nw > 0) + { + p_.reserve(nw); + double delta = (xmax - xmin) / static_cast(nw); + assert(delta > 0); + double t = delta * 0.5; + for (size_t i = 0; i < nw; ++i) + { + p_.push_back(fw(xmin + i * delta + t)); + } + } + init(); + } + + const std::vector& probabilities() const + { + return p_; + } + size_t n() const + { + return p_.size() - 1; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.probabilities() == b.probabilities(); + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class discrete_distribution; + + void init(); + + std::vector p_; // normalized probabilities + std::vector> q_; // (acceptance, alternate) pairs + + static_assert(std::is_integral::value, "Class-template absl::discrete_distribution<> must be " + "parameterized using an integral type."); + }; + + discrete_distribution() : + param_() + { + } + + explicit discrete_distribution(const param_type& p) : + param_(p) + { + } + + template + explicit discrete_distribution(InputIterator begin, InputIterator end) : + param_(begin, end) + { + } + + explicit discrete_distribution(std::initializer_list weights) : + param_(weights) + { + } + + template + explicit discrete_distribution(size_t nw, double xmin, double xmax, UnaryOperation fw) : + param_(nw, xmin, xmax, std::move(fw)) + { + } + + void reset() + { + } + + // generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + const param_type& param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return 0; + } + result_type(max)() const + { + return static_cast(param_.n()); + } // inclusive + + // NOTE [rand.dist.sample.discrete] returns a std::vector not a + // const std::vector&. + const std::vector& probabilities() const + { + return param_.probabilities(); + } + + friend bool operator==(const discrete_distribution& a, const discrete_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const discrete_distribution& a, const discrete_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + }; + + // -------------------------------------------------------------------------- + // Implementation details only below + // -------------------------------------------------------------------------- + + namespace random_internal + { + + // Using the vector `*probabilities`, whose values are the weights or + // probabilities of an element being selected, constructs the proportional + // probabilities used by the discrete distribution. `*probabilities` will be + // scaled, if necessary, so that its entries sum to a value sufficiently close + // to 1.0. + std::vector> InitDiscreteDistribution( + std::vector* probabilities + ); + + } // namespace random_internal + + template + void discrete_distribution::param_type::init() + { + if (p_.empty()) + { + p_.push_back(1.0); + q_.emplace_back(1.0, 0); + } + else + { + assert(n() <= (std::numeric_limits::max)()); + q_ = random_internal::InitDiscreteDistribution(&p_); + } + } + + template + template + typename discrete_distribution::result_type + discrete_distribution::operator()( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + const auto idx = absl::uniform_int_distribution(0, p.n())(g); + const auto& q = p.q_[idx]; + const bool selected = absl::bernoulli_distribution(q.first)(g); + return selected ? idx : static_cast(q.second); + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const discrete_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + const auto& probabilities = x.param().probabilities(); + os << probabilities.size(); + + os.precision(random_internal::stream_precision_helper::kPrecision); + for (const auto& p : probabilities) + { + os << os.fill() << p; + } + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + discrete_distribution& x + ) + { // NOLINT(runtime/references) + using param_type = typename discrete_distribution::param_type; + auto saver = random_internal::make_istream_state_saver(is); + + size_t n; + std::vector p; + + is >> n; + if (is.fail()) + return is; + if (n > 0) + { + p.reserve(n); + for (IntType i = 0; i < n && !is.fail(); ++i) + { + auto tmp = random_internal::read_floating_point(is); + if (is.fail()) + return is; + p.push_back(tmp); + } + } + x.param(param_type(p.begin(), p.end())); + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_DISCRETE_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/distributions.h b/CAPI/cpp/grpc/include/absl/random/distributions.h new file mode 100644 index 00000000..44161dbb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/distributions.h @@ -0,0 +1,472 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: distributions.h +// ----------------------------------------------------------------------------- +// +// This header defines functions representing distributions, which you use in +// combination with an Abseil random bit generator to produce random values +// according to the rules of that distribution. +// +// The Abseil random library defines the following distributions within this +// file: +// +// * `absl::Uniform` for uniform (constant) distributions having constant +// probability +// * `absl::Bernoulli` for discrete distributions having exactly two outcomes +// * `absl::Beta` for continuous distributions parameterized through two +// free parameters +// * `absl::Exponential` for discrete distributions of events occurring +// continuously and independently at a constant average rate +// * `absl::Gaussian` (also known as "normal distributions") for continuous +// distributions using an associated quadratic function +// * `absl::LogUniform` for continuous uniform distributions where the log +// to the given base of all values is uniform +// * `absl::Poisson` for discrete probability distributions that express the +// probability of a given number of events occurring within a fixed interval +// * `absl::Zipf` for discrete probability distributions commonly used for +// modelling of rare events +// +// Prefer use of these distribution function classes over manual construction of +// your own distribution classes, as it allows library maintainers greater +// flexibility to change the underlying implementation in the future. + +#ifndef ABSL_RANDOM_DISTRIBUTIONS_H_ +#define ABSL_RANDOM_DISTRIBUTIONS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/internal/inline_variable.h" +#include "absl/random/bernoulli_distribution.h" +#include "absl/random/beta_distribution.h" +#include "absl/random/exponential_distribution.h" +#include "absl/random/gaussian_distribution.h" +#include "absl/random/internal/distribution_caller.h" // IWYU pragma: export +#include "absl/random/internal/uniform_helper.h" // IWYU pragma: export +#include "absl/random/log_uniform_int_distribution.h" +#include "absl/random/poisson_distribution.h" +#include "absl/random/uniform_int_distribution.h" +#include "absl/random/uniform_real_distribution.h" +#include "absl/random/zipf_distribution.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosedClosed, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosed, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedOpenTag, IntervalClosedOpen, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpenOpen, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpen, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenClosedTag, IntervalOpenClosed, {}); + + // ----------------------------------------------------------------------------- + // absl::Uniform(tag, bitgen, lo, hi) + // ----------------------------------------------------------------------------- + // + // `absl::Uniform()` produces random values of type `T` uniformly distributed in + // a defined interval {lo, hi}. The interval `tag` defines the type of interval + // which should be one of the following possible values: + // + // * `absl::IntervalOpenOpen` + // * `absl::IntervalOpenClosed` + // * `absl::IntervalClosedOpen` + // * `absl::IntervalClosedClosed` + // + // where "open" refers to an exclusive value (excluded) from the output, while + // "closed" refers to an inclusive value (included) from the output. + // + // In the absence of an explicit return type `T`, `absl::Uniform()` will deduce + // the return type based on the provided endpoint arguments {A lo, B hi}. + // Given these endpoints, one of {A, B} will be chosen as the return type, if + // a type can be implicitly converted into the other in a lossless way. The + // lack of any such implicit conversion between {A, B} will produce a + // compile-time error + // + // See https://en.wikipedia.org/wiki/Uniform_distribution_(continuous) + // + // Example: + // + // absl::BitGen bitgen; + // + // // Produce a random float value between 0.0 and 1.0, inclusive + // auto x = absl::Uniform(absl::IntervalClosedClosed, bitgen, 0.0f, 1.0f); + // + // // The most common interval of `absl::IntervalClosedOpen` is available by + // // default: + // + // auto x = absl::Uniform(bitgen, 0.0f, 1.0f); + // + // // Return-types are typically inferred from the arguments, however callers + // // can optionally provide an explicit return-type to the template. + // + // auto x = absl::Uniform(bitgen, 0, 1); + // + template + typename absl::enable_if_t::value, R> // + Uniform(TagType tag, + URBG&& urbg, // NOLINT(runtime/references) + R lo, + R hi) + { + using gen_t = absl::decay_t; + using distribution_t = random_internal::UniformDistributionWrapper; + + auto a = random_internal::uniform_lower_bound(tag, lo, hi); + auto b = random_internal::uniform_upper_bound(tag, lo, hi); + if (!random_internal::is_uniform_range_valid(a, b)) + return lo; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, tag, lo, hi); + } + + // absl::Uniform(bitgen, lo, hi) + // + // Overload of `Uniform()` using the default closed-open interval of [lo, hi), + // and returning values of type `T` + template + typename absl::enable_if_t::value, R> // + Uniform(URBG&& urbg, // NOLINT(runtime/references) + R lo, + R hi) + { + using gen_t = absl::decay_t; + using distribution_t = random_internal::UniformDistributionWrapper; + constexpr auto tag = absl::IntervalClosedOpen; + + auto a = random_internal::uniform_lower_bound(tag, lo, hi); + auto b = random_internal::uniform_upper_bound(tag, lo, hi); + if (!random_internal::is_uniform_range_valid(a, b)) + return lo; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, lo, hi); + } + + // absl::Uniform(tag, bitgen, lo, hi) + // + // Overload of `Uniform()` using different (but compatible) lo, hi types. Note + // that a compile-error will result if the return type cannot be deduced + // correctly from the passed types. + template + typename absl::enable_if_t::value, random_internal::uniform_inferred_return_t> + Uniform(TagType tag, + URBG&& urbg, // NOLINT(runtime/references) + A lo, + B hi) + { + using gen_t = absl::decay_t; + using return_t = typename random_internal::uniform_inferred_return_t; + using distribution_t = random_internal::UniformDistributionWrapper; + + auto a = random_internal::uniform_lower_bound(tag, lo, hi); + auto b = random_internal::uniform_upper_bound(tag, lo, hi); + if (!random_internal::is_uniform_range_valid(a, b)) + return lo; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, tag, static_cast(lo), static_cast(hi)); + } + + // absl::Uniform(bitgen, lo, hi) + // + // Overload of `Uniform()` using different (but compatible) lo, hi types and the + // default closed-open interval of [lo, hi). Note that a compile-error will + // result if the return type cannot be deduced correctly from the passed types. + template + typename absl::enable_if_t::value, random_internal::uniform_inferred_return_t> + Uniform(URBG&& urbg, // NOLINT(runtime/references) + A lo, + B hi) + { + using gen_t = absl::decay_t; + using return_t = typename random_internal::uniform_inferred_return_t; + using distribution_t = random_internal::UniformDistributionWrapper; + + constexpr auto tag = absl::IntervalClosedOpen; + auto a = random_internal::uniform_lower_bound(tag, lo, hi); + auto b = random_internal::uniform_upper_bound(tag, lo, hi); + if (!random_internal::is_uniform_range_valid(a, b)) + return lo; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, static_cast(lo), static_cast(hi)); + } + + // absl::Uniform(bitgen) + // + // Overload of Uniform() using the minimum and maximum values of a given type + // `T` (which must be unsigned), returning a value of type `unsigned T` + template + typename absl::enable_if_t::value, R> // + Uniform(URBG&& urbg) + { // NOLINT(runtime/references) + using gen_t = absl::decay_t; + using distribution_t = random_internal::UniformDistributionWrapper; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg); + } + + // ----------------------------------------------------------------------------- + // absl::Bernoulli(bitgen, p) + // ----------------------------------------------------------------------------- + // + // `absl::Bernoulli` produces a random boolean value, with probability `p` + // (where 0.0 <= p <= 1.0) equaling `true`. + // + // Prefer `absl::Bernoulli` to produce boolean values over other alternatives + // such as comparing an `absl::Uniform()` value to a specific output. + // + // See https://en.wikipedia.org/wiki/Bernoulli_distribution + // + // Example: + // + // absl::BitGen bitgen; + // ... + // if (absl::Bernoulli(bitgen, 1.0/3721.0)) { + // std::cout << "Asteroid field navigation successful."; + // } + // + template + bool Bernoulli(URBG&& urbg, // NOLINT(runtime/references) + double p) + { + using gen_t = absl::decay_t; + using distribution_t = absl::bernoulli_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, p); + } + + // ----------------------------------------------------------------------------- + // absl::Beta(bitgen, alpha, beta) + // ----------------------------------------------------------------------------- + // + // `absl::Beta` produces a floating point number distributed in the closed + // interval [0,1] and parameterized by two values `alpha` and `beta` as per a + // Beta distribution. `T` must be a floating point type, but may be inferred + // from the types of `alpha` and `beta`. + // + // See https://en.wikipedia.org/wiki/Beta_distribution. + // + // Example: + // + // absl::BitGen bitgen; + // ... + // double sample = absl::Beta(bitgen, 3.0, 2.0); + // + template + RealType Beta(URBG&& urbg, // NOLINT(runtime/references) + RealType alpha, + RealType beta) + { + static_assert( + std::is_floating_point::value, + "Template-argument 'RealType' must be a floating-point type, in " + "absl::Beta(...)" + ); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::beta_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, alpha, beta); + } + + // ----------------------------------------------------------------------------- + // absl::Exponential(bitgen, lambda = 1) + // ----------------------------------------------------------------------------- + // + // `absl::Exponential` produces a floating point number representing the + // distance (time) between two consecutive events in a point process of events + // occurring continuously and independently at a constant average rate. `T` must + // be a floating point type, but may be inferred from the type of `lambda`. + // + // See https://en.wikipedia.org/wiki/Exponential_distribution. + // + // Example: + // + // absl::BitGen bitgen; + // ... + // double call_length = absl::Exponential(bitgen, 7.0); + // + template + RealType Exponential(URBG&& urbg, // NOLINT(runtime/references) + RealType lambda = 1) + { + static_assert( + std::is_floating_point::value, + "Template-argument 'RealType' must be a floating-point type, in " + "absl::Exponential(...)" + ); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::exponential_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, lambda); + } + + // ----------------------------------------------------------------------------- + // absl::Gaussian(bitgen, mean = 0, stddev = 1) + // ----------------------------------------------------------------------------- + // + // `absl::Gaussian` produces a floating point number selected from the Gaussian + // (ie. "Normal") distribution. `T` must be a floating point type, but may be + // inferred from the types of `mean` and `stddev`. + // + // See https://en.wikipedia.org/wiki/Normal_distribution + // + // Example: + // + // absl::BitGen bitgen; + // ... + // double giraffe_height = absl::Gaussian(bitgen, 16.3, 3.3); + // + template + RealType Gaussian(URBG&& urbg, // NOLINT(runtime/references) + RealType mean = 0, + RealType stddev = 1) + { + static_assert( + std::is_floating_point::value, + "Template-argument 'RealType' must be a floating-point type, in " + "absl::Gaussian(...)" + ); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::gaussian_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, mean, stddev); + } + + // ----------------------------------------------------------------------------- + // absl::LogUniform(bitgen, lo, hi, base = 2) + // ----------------------------------------------------------------------------- + // + // `absl::LogUniform` produces random values distributed where the log to a + // given base of all values is uniform in a closed interval [lo, hi]. `T` must + // be an integral type, but may be inferred from the types of `lo` and `hi`. + // + // I.e., `LogUniform(0, n, b)` is uniformly distributed across buckets + // [0], [1, b-1], [b, b^2-1] .. [b^(k-1), (b^k)-1] .. [b^floor(log(n, b)), n] + // and is uniformly distributed within each bucket. + // + // The resulting probability density is inversely related to bucket size, though + // values in the final bucket may be more likely than previous values. (In the + // extreme case where n = b^i the final value will be tied with zero as the most + // probable result. + // + // If `lo` is nonzero then this distribution is shifted to the desired interval, + // so LogUniform(lo, hi, b) is equivalent to LogUniform(0, hi-lo, b)+lo. + // + // See http://ecolego.facilia.se/ecolego/show/Log-Uniform%20Distribution + // + // Example: + // + // absl::BitGen bitgen; + // ... + // int v = absl::LogUniform(bitgen, 0, 1000); + // + template + IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references) + IntType lo, + IntType hi, + IntType base = 2) + { + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " + "absl::LogUniform(...)"); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::log_uniform_int_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, lo, hi, base); + } + + // ----------------------------------------------------------------------------- + // absl::Poisson(bitgen, mean = 1) + // ----------------------------------------------------------------------------- + // + // `absl::Poisson` produces discrete probabilities for a given number of events + // occurring within a fixed interval within the closed interval [0, max]. `T` + // must be an integral type. + // + // See https://en.wikipedia.org/wiki/Poisson_distribution + // + // Example: + // + // absl::BitGen bitgen; + // ... + // int requests_per_minute = absl::Poisson(bitgen, 3.2); + // + template + IntType Poisson(URBG&& urbg, // NOLINT(runtime/references) + double mean = 1.0) + { + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " + "absl::Poisson(...)"); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::poisson_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, mean); + } + + // ----------------------------------------------------------------------------- + // absl::Zipf(bitgen, hi = max, q = 2, v = 1) + // ----------------------------------------------------------------------------- + // + // `absl::Zipf` produces discrete probabilities commonly used for modelling of + // rare events over the closed interval [0, hi]. The parameters `v` and `q` + // determine the skew of the distribution. `T` must be an integral type, but + // may be inferred from the type of `hi`. + // + // See http://mathworld.wolfram.com/ZipfDistribution.html + // + // Example: + // + // absl::BitGen bitgen; + // ... + // int term_rank = absl::Zipf(bitgen); + // + template + IntType Zipf(URBG&& urbg, // NOLINT(runtime/references) + IntType hi = (std::numeric_limits::max)(), + double q = 2.0, + double v = 1.0) + { + static_assert(random_internal::IsIntegral::value, "Template-argument 'IntType' must be an integral type, in " + "absl::Zipf(...)"); + + using gen_t = absl::decay_t; + using distribution_t = typename absl::zipf_distribution; + + return random_internal::DistributionCaller::template Call< + distribution_t>(&urbg, hi, q, v); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_DISTRIBUTIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/exponential_distribution.h b/CAPI/cpp/grpc/include/absl/random/exponential_distribution.h new file mode 100644 index 00000000..4ae88fb4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/exponential_distribution.h @@ -0,0 +1,208 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_EXPONENTIAL_DISTRIBUTION_H_ +#define ABSL_RANDOM_EXPONENTIAL_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::exponential_distribution: + // Generates a number conforming to an exponential distribution and is + // equivalent to the standard [rand.dist.pois.exp] distribution. + template + class exponential_distribution + { + public: + using result_type = RealType; + + class param_type + { + public: + using distribution_type = exponential_distribution; + + explicit param_type(result_type lambda = 1) : + lambda_(lambda) + { + assert(lambda > 0); + neg_inv_lambda_ = -result_type(1) / lambda_; + } + + result_type lambda() const + { + return lambda_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.lambda_ == b.lambda_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class exponential_distribution; + + result_type lambda_; + result_type neg_inv_lambda_; + + static_assert( + std::is_floating_point::value, + "Class-template absl::exponential_distribution<> must be parameterized " + "using a floating-point type." + ); + }; + + exponential_distribution() : + exponential_distribution(1) + { + } + + explicit exponential_distribution(result_type lambda) : + param_(lambda) + { + } + + explicit exponential_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + // Generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return 0; + } + result_type(max)() const + { + return std::numeric_limits::infinity(); + } + + result_type lambda() const + { + return param_.lambda(); + } + + friend bool operator==(const exponential_distribution& a, const exponential_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const exponential_distribution& a, const exponential_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + random_internal::FastUniformBits fast_u64_; + }; + + // -------------------------------------------------------------------------- + // Implementation details follow + // -------------------------------------------------------------------------- + + template + template + typename exponential_distribution::result_type + exponential_distribution::operator()( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + using random_internal::GenerateNegativeTag; + using random_internal::GenerateRealFromBits; + using real_type = + absl::conditional_t::value, float, double>; + + const result_type u = GenerateRealFromBits(fast_u64_(g)); // U(-1, 0) + + // log1p(-x) is mathematically equivalent to log(1 - x) but has more + // accuracy for x near zero. + return p.neg_inv_lambda_ * std::log1p(u); + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const exponential_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.lambda(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + exponential_distribution& x + ) + { // NOLINT(runtime/references) + using result_type = typename exponential_distribution::result_type; + using param_type = typename exponential_distribution::param_type; + result_type lambda; + + auto saver = random_internal::make_istream_state_saver(is); + lambda = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(param_type(lambda)); + } + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_EXPONENTIAL_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/gaussian_distribution.h b/CAPI/cpp/grpc/include/absl/random/gaussian_distribution.h new file mode 100644 index 00000000..43aecc17 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/gaussian_distribution.h @@ -0,0 +1,339 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_GAUSSIAN_DISTRIBUTION_H_ +#define ABSL_RANDOM_GAUSSIAN_DISTRIBUTION_H_ + +// absl::gaussian_distribution implements the Ziggurat algorithm +// for generating random gaussian numbers. +// +// Implementation based on "The Ziggurat Method for Generating Random Variables" +// by George Marsaglia and Wai Wan Tsang: http://www.jstatsoft.org/v05/i08/ +// + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // absl::gaussian_distribution_base implements the underlying ziggurat algorithm + // using the ziggurat tables generated by the gaussian_distribution_gentables + // binary. + // + // The specific algorithm has some of the improvements suggested by the + // 2005 paper, "An Improved Ziggurat Method to Generate Normal Random Samples", + // Jurgen A Doornik. (https://www.doornik.com/research/ziggurat.pdf) + class ABSL_DLL gaussian_distribution_base + { + public: + template + inline double zignor(URBG& g); // NOLINT(runtime/references) + + private: + friend class TableGenerator; + + template + inline double zignor_fallback(URBG& g, // NOLINT(runtime/references) + bool neg); + + // Constants used for the gaussian distribution. + static constexpr double kR = 3.442619855899; // Start of the tail. + static constexpr double kRInv = 0.29047645161474317; // ~= (1.0 / kR) . + static constexpr double kV = 9.91256303526217e-3; + static constexpr uint64_t kMask = 0x07f; + + // The ziggurat tables store the pdf(f) and inverse-pdf(x) for equal-area + // points on one-half of the normal distribution, where the pdf function, + // pdf = e ^ (-1/2 *x^2), assumes that the mean = 0 & stddev = 1. + // + // These tables are just over 2kb in size; larger tables might improve the + // distributions, but also lead to more cache pollution. + // + // x = {3.71308, 3.44261, 3.22308, ..., 0} + // f = {0.00101, 0.00266, 0.00554, ..., 1} + struct Tables + { + double x[kMask + 2]; + double f[kMask + 2]; + }; + static const Tables zg_; + random_internal::FastUniformBits fast_u64_; + }; + + } // namespace random_internal + + // absl::gaussian_distribution: + // Generates a number conforming to a Gaussian distribution. + template + class gaussian_distribution : random_internal::gaussian_distribution_base + { + public: + using result_type = RealType; + + class param_type + { + public: + using distribution_type = gaussian_distribution; + + explicit param_type(result_type mean = 0, result_type stddev = 1) : + mean_(mean), + stddev_(stddev) + { + } + + // Returns the mean distribution parameter. The mean specifies the location + // of the peak. The default value is 0.0. + result_type mean() const + { + return mean_; + } + + // Returns the deviation distribution parameter. The default value is 1.0. + result_type stddev() const + { + return stddev_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.mean_ == b.mean_ && a.stddev_ == b.stddev_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + result_type mean_; + result_type stddev_; + + static_assert( + std::is_floating_point::value, + "Class-template absl::gaussian_distribution<> must be parameterized " + "using a floating-point type." + ); + }; + + gaussian_distribution() : + gaussian_distribution(0) + { + } + + explicit gaussian_distribution(result_type mean, result_type stddev = 1) : + param_(mean, stddev) + { + } + + explicit gaussian_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + // Generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return -std::numeric_limits::infinity(); + } + result_type(max)() const + { + return std::numeric_limits::infinity(); + } + + result_type mean() const + { + return param_.mean(); + } + result_type stddev() const + { + return param_.stddev(); + } + + friend bool operator==(const gaussian_distribution& a, const gaussian_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const gaussian_distribution& a, const gaussian_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + }; + + // -------------------------------------------------------------------------- + // Implementation details only below + // -------------------------------------------------------------------------- + + template + template + typename gaussian_distribution::result_type + gaussian_distribution::operator()( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + return p.mean() + p.stddev() * static_cast(zignor(g)); + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const gaussian_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.mean() << os.fill() << x.stddev(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + gaussian_distribution& x + ) + { // NOLINT(runtime/references) + using result_type = typename gaussian_distribution::result_type; + using param_type = typename gaussian_distribution::param_type; + + auto saver = random_internal::make_istream_state_saver(is); + auto mean = random_internal::read_floating_point(is); + if (is.fail()) + return is; + auto stddev = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(param_type(mean, stddev)); + } + return is; + } + + namespace random_internal + { + + template + inline double gaussian_distribution_base::zignor_fallback(URBG& g, bool neg) + { + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + + // This fallback path happens approximately 0.05% of the time. + double x, y; + do + { + // kRInv = 1/r, U(0, 1) + x = kRInv * + std::log(GenerateRealFromBits( + fast_u64_(g) + )); + y = -std::log( + GenerateRealFromBits(fast_u64_(g)) + ); + } while ((y + y) < (x * x)); + return neg ? (x - kR) : (kR - x); + } + + template + inline double gaussian_distribution_base::zignor( + URBG& g + ) + { // NOLINT(runtime/references) + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + using random_internal::GenerateSignedTag; + + while (true) + { + // We use a single uint64_t to generate both a double and a strip. + // These bits are unused when the generated double is > 1/2^5. + // This may introduce some bias from the duplicated low bits of small + // values (those smaller than 1/2^5, which all end up on the left tail). + uint64_t bits = fast_u64_(g); + int i = static_cast(bits & kMask); // pick a random strip + double j = GenerateRealFromBits( + bits + ); // U(-1, 1) + const double x = j * zg_.x[i]; + + // Retangular box. Handles >97% of all cases. + // For any given box, this handles between 75% and 99% of values. + // Equivalent to U(01) < (x[i+1] / x[i]), and when i == 0, ~93.5% + if (std::abs(x) < zg_.x[i + 1]) + { + return x; + } + + // i == 0: Base box. Sample using a ratio of uniforms. + if (i == 0) + { + // This path happens about 0.05% of the time. + return zignor_fallback(g, j < 0); + } + + // i > 0: Wedge samples using precomputed values. + double v = GenerateRealFromBits( + fast_u64_(g) + ); // U(0, 1) + if ((zg_.f[i + 1] + v * (zg_.f[i] - zg_.f[i + 1])) < + std::exp(-0.5 * x * x)) + { + return x; + } + + // The wedge was missed; reject the value and try again. + } + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_GAUSSIAN_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/chi_square.h b/CAPI/cpp/grpc/include/absl/random/internal/chi_square.h new file mode 100644 index 00000000..3a581759 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/chi_square.h @@ -0,0 +1,97 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_CHI_SQUARE_H_ +#define ABSL_RANDOM_INTERNAL_CHI_SQUARE_H_ + +// The chi-square statistic. +// +// Useful for evaluating if `D` independent random variables are behaving as +// expected, or if two distributions are similar. (`D` is the degrees of +// freedom). +// +// Each bucket should have an expected count of 10 or more for the chi square to +// be meaningful. + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + constexpr const char kChiSquared[] = "chi-squared"; + + // Returns the measured chi square value, using a single expected value. This + // assumes that the values in [begin, end) are uniformly distributed. + template + double ChiSquareWithExpected(Iterator begin, Iterator end, double expected) + { + // Compute the sum and the number of buckets. + assert(expected >= 10); // require at least 10 samples per bucket. + double chi_square = 0; + for (auto it = begin; it != end; it++) + { + double d = static_cast(*it) - expected; + chi_square += d * d; + } + chi_square = chi_square / expected; + return chi_square; + } + + // Returns the measured chi square value, taking the actual value of each bucket + // from the first set of iterators, and the expected value of each bucket from + // the second set of iterators. + template + double ChiSquare(Iterator it, Iterator end, Expected eit, Expected eend) + { + double chi_square = 0; + for (; it != end && eit != eend; ++it, ++eit) + { + if (*it > 0) + { + assert(*eit > 0); + } + double e = static_cast(*eit); + double d = static_cast(*it - *eit); + if (d != 0) + { + assert(e > 0); + chi_square += (d * d) / e; + } + } + assert(it == end && eit == eend); + return chi_square; + } + + // ====================================================================== + // The following methods can be used for an arbitrary significance level. + // + + // Calculates critical chi-square values to produce the given p-value using a + // bisection search for a value within epsilon, relying on the monotonicity of + // ChiSquarePValue(). + double ChiSquareValue(int dof, double p); + + // Calculates the p-value (probability) of a given chi-square value. + double ChiSquarePValue(double chi_square, int dof); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_CHI_SQUARE_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/distribution_caller.h b/CAPI/cpp/grpc/include/absl/random/internal/distribution_caller.h new file mode 100644 index 00000000..c5ed5588 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/distribution_caller.h @@ -0,0 +1,102 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_ +#define ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // DistributionCaller provides an opportunity to overload the general + // mechanism for calling a distribution, allowing for mock-RNG classes + // to intercept such calls. + template + struct DistributionCaller + { + static_assert(!std::is_pointer::value, "You must pass a reference, not a pointer."); + // SFINAE to detect whether the URBG type includes a member matching + // bool InvokeMock(base_internal::FastTypeIdType, void*, void*). + // + // These live inside BitGenRef so that they have friend access + // to MockingBitGen. (see similar methods in DistributionCaller). + template class Trait, class AlwaysVoid, class... Args> + struct detector : std::false_type + { + }; + template class Trait, class... Args> + struct detector>, Args...> : std::true_type + { + }; + + template + using invoke_mock_t = decltype(std::declval()->InvokeMock( + std::declval<::absl::base_internal::FastTypeIdType>(), + std::declval(), + std::declval() + )); + + using HasInvokeMock = typename detector::type; + + // Default implementation of distribution caller. + template + static typename DistrT::result_type Impl(std::false_type, URBG* urbg, Args&&... args) + { + DistrT dist(std::forward(args)...); + return dist(*urbg); + } + + // Mock implementation of distribution caller. + // The underlying KeyT must match the KeyT constructed by MockOverloadSet. + template + static typename DistrT::result_type Impl(std::true_type, URBG* urbg, Args&&... args) + { + using ResultT = typename DistrT::result_type; + using ArgTupleT = std::tuple...>; + using KeyT = ResultT(DistrT, ArgTupleT); + + ArgTupleT arg_tuple(std::forward(args)...); + ResultT result; + if (!urbg->InvokeMock(::absl::base_internal::FastTypeId(), &arg_tuple, &result)) + { + auto dist = absl::make_from_tuple(arg_tuple); + result = dist(*urbg); + } + return result; + } + + // Default implementation of distribution caller. + template + static typename DistrT::result_type Call(URBG* urbg, Args&&... args) + { + return Impl(HasInvokeMock{}, urbg, std::forward(args)...); + } + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/distribution_test_util.h b/CAPI/cpp/grpc/include/absl/random/internal/distribution_test_util.h new file mode 100644 index 00000000..619e324b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/distribution_test_util.h @@ -0,0 +1,117 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_DISTRIBUTION_TEST_UTIL_H_ +#define ABSL_RANDOM_INTERNAL_DISTRIBUTION_TEST_UTIL_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/span.h" + +// NOTE: The functions in this file are test only, and are should not be used in +// non-test code. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // http://webspace.ship.edu/pgmarr/Geo441/Lectures/Lec%205%20-%20Normality%20Testing.pdf + + // Compute the 1st to 4th standard moments: + // mean, variance, skewness, and kurtosis. + // http://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm + struct DistributionMoments + { + size_t n = 0; + double mean = 0.0; + double variance = 0.0; + double skewness = 0.0; + double kurtosis = 0.0; + }; + DistributionMoments ComputeDistributionMoments( + absl::Span data_points + ); + + std::ostream& operator<<(std::ostream& os, const DistributionMoments& moments); + + // Computes the Z-score for a set of data with the given distribution moments + // compared against `expected_mean`. + double ZScore(double expected_mean, const DistributionMoments& moments); + + // Returns the probability of success required for a single trial to ensure that + // after `num_trials` trials, the probability of at least one failure is no more + // than `p_fail`. + double RequiredSuccessProbability(double p_fail, int num_trials); + + // Computes the maximum distance from the mean tolerable, for Z-Tests that are + // expected to pass with `acceptance_probability`. Will terminate if the + // resulting tolerance is zero (due to passing in 0.0 for + // `acceptance_probability` or rounding errors). + // + // For example, + // MaxErrorTolerance(0.001) = 0.0 + // MaxErrorTolerance(0.5) = ~0.47 + // MaxErrorTolerance(1.0) = inf + double MaxErrorTolerance(double acceptance_probability); + + // Approximation to inverse of the Error Function in double precision. + // (http://people.maths.ox.ac.uk/gilesm/files/gems_erfinv.pdf) + double erfinv(double x); + + // Beta(p, q) = Gamma(p) * Gamma(q) / Gamma(p+q) + double beta(double p, double q); + + // The inverse of the normal survival function. + double InverseNormalSurvival(double x); + + // Returns whether actual is "near" expected, based on the bound. + bool Near(absl::string_view msg, double actual, double expected, double bound); + + // Implements the incomplete regularized beta function, AS63, BETAIN. + // https://www.jstor.org/stable/2346797 + // + // BetaIncomplete(x, p, q), where + // `x` is the value of the upper limit + // `p` is beta parameter p, `q` is beta parameter q. + // + // NOTE: This is a test-only function which is only accurate to within, at most, + // 1e-13 of the actual value. + // + double BetaIncomplete(double x, double p, double q); + + // Implements the inverse of the incomplete regularized beta function, AS109, + // XINBTA. + // https://www.jstor.org/stable/2346798 + // https://www.jstor.org/stable/2346887 + // + // BetaIncompleteInv(p, q, beta, alhpa) + // `p` is beta parameter p, `q` is beta parameter q. + // `alpha` is the value of the lower tail area. + // + // NOTE: This is a test-only function and, when successful, is only accurate to + // within ~1e-6 of the actual value; there are some cases where it diverges from + // the actual value by much more than that. The function uses Newton's method, + // and thus the runtime is highly variable. + double BetaIncompleteInv(double p, double q, double alpha); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_DISTRIBUTION_TEST_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/explicit_seed_seq.h b/CAPI/cpp/grpc/include/absl/random/internal/explicit_seed_seq.h new file mode 100644 index 00000000..5ba7c4de --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/explicit_seed_seq.h @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_ +#define ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // This class conforms to the C++ Standard "Seed Sequence" concept + // [rand.req.seedseq]. + // + // An "ExplicitSeedSeq" is meant to provide a conformant interface for + // forwarding pre-computed seed material to the constructor of a class + // conforming to the "Uniform Random Bit Generator" concept. This class makes no + // attempt to mutate the state provided by its constructor, and returns it + // directly via ExplicitSeedSeq::generate(). + // + // If this class is asked to generate more seed material than was provided to + // the constructor, then the remaining bytes will be filled with deterministic, + // nonrandom data. + class ExplicitSeedSeq + { + public: + using result_type = uint32_t; + + ExplicitSeedSeq() : + state_() + { + } + + // Copy and move both allowed. + ExplicitSeedSeq(const ExplicitSeedSeq& other) = default; + ExplicitSeedSeq& operator=(const ExplicitSeedSeq& other) = default; + ExplicitSeedSeq(ExplicitSeedSeq&& other) = default; + ExplicitSeedSeq& operator=(ExplicitSeedSeq&& other) = default; + + template + ExplicitSeedSeq(Iterator begin, Iterator end) + { + for (auto it = begin; it != end; it++) + { + state_.push_back(*it & 0xffffffff); + } + } + + template + ExplicitSeedSeq(std::initializer_list il) : + ExplicitSeedSeq(il.begin(), il.end()) + { + } + + size_t size() const + { + return state_.size(); + } + + template + void param(OutIterator out) const + { + std::copy(std::begin(state_), std::end(state_), out); + } + + template + void generate(OutIterator begin, OutIterator end) + { + for (size_t index = 0; begin != end; begin++) + { + *begin = state_.empty() ? 0 : state_[index++]; + if (index >= state_.size()) + { + index = 0; + } + } + } + + protected: + std::vector state_; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_EXPLICIT_SEED_SEQ_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/fast_uniform_bits.h b/CAPI/cpp/grpc/include/absl/random/internal/fast_uniform_bits.h new file mode 100644 index 00000000..1135bc1f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/fast_uniform_bits.h @@ -0,0 +1,285 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_ +#define ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + // Returns true if the input value is zero or a power of two. Useful for + // determining if the range of output values in a URBG + template + constexpr bool IsPowerOfTwoOrZero(UIntType n) + { + return (n == 0) || ((n & (n - 1)) == 0); + } + + // Computes the length of the range of values producible by the URBG, or returns + // zero if that would encompass the entire range of representable values in + // URBG::result_type. + template + constexpr typename URBG::result_type RangeSize() + { + using result_type = typename URBG::result_type; + static_assert((URBG::max)() != (URBG::min)(), "URBG range cannot be 0."); + return ((URBG::max)() == (std::numeric_limits::max)() && + (URBG::min)() == std::numeric_limits::lowest()) ? + result_type{0} : + ((URBG::max)() - (URBG::min)() + result_type{1}); + } + + // Computes the floor of the log. (i.e., std::floor(std::log2(N)); + template + constexpr UIntType IntegerLog2(UIntType n) + { + return (n <= 1) ? 0 : 1 + IntegerLog2(n >> 1); + } + + // Returns the number of bits of randomness returned through + // `PowerOfTwoVariate(urbg)`. + template + constexpr size_t NumBits() + { + return static_cast( + RangeSize() == 0 ? std::numeric_limits::digits : IntegerLog2(RangeSize()) + ); + } + + // Given a shift value `n`, constructs a mask with exactly the low `n` bits set. + // If `n == 0`, all bits are set. + template + constexpr UIntType MaskFromShift(size_t n) + { + return ((n % std::numeric_limits::digits) == 0) ? ~UIntType{0} : (UIntType{1} << n) - UIntType{1}; + } + + // Tags used to dispatch FastUniformBits::generate to the simple or more complex + // entropy extraction algorithm. + struct SimplifiedLoopTag + { + }; + struct RejectionLoopTag + { + }; + + // FastUniformBits implements a fast path to acquire uniform independent bits + // from a type which conforms to the [rand.req.urbg] concept. + // Parameterized by: + // `UIntType`: the result (output) type + // + // The std::independent_bits_engine [rand.adapt.ibits] adaptor can be + // instantiated from an existing generator through a copy or a move. It does + // not, however, facilitate the production of pseudorandom bits from an un-owned + // generator that will outlive the std::independent_bits_engine instance. + template + class FastUniformBits + { + public: + using result_type = UIntType; + + static constexpr result_type(min)() + { + return 0; + } + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + template + result_type operator()(URBG& g); // NOLINT(runtime/references) + + private: + static_assert(IsUnsigned::value, "Class-template FastUniformBits<> must be parameterized using " + "an unsigned type."); + + // Generate() generates a random value, dispatched on whether + // the underlying URBG must use rejection sampling to generate a value, + // or whether a simplified loop will suffice. + template + result_type Generate(URBG& g, // NOLINT(runtime/references) + SimplifiedLoopTag); + + template + result_type Generate(URBG& g, // NOLINT(runtime/references) + RejectionLoopTag); + }; + + template + template + typename FastUniformBits::result_type + FastUniformBits::operator()(URBG& g) + { // NOLINT(runtime/references) + // kRangeMask is the mask used when sampling variates from the URBG when the + // width of the URBG range is not a power of 2. + // Y = (2 ^ kRange) - 1 + static_assert((URBG::max)() > (URBG::min)(), "URBG::max and URBG::min may not be equal."); + + using tag = absl::conditional_t()), SimplifiedLoopTag, RejectionLoopTag>; + return Generate(g, tag{}); + } + + template + template + typename FastUniformBits::result_type + FastUniformBits::Generate(URBG& g, // NOLINT(runtime/references) + SimplifiedLoopTag) + { + // The simplified version of FastUniformBits works only on URBGs that have + // a range that is a power of 2. In this case we simply loop and shift without + // attempting to balance the bits across calls. + static_assert(IsPowerOfTwoOrZero(RangeSize()), "incorrect Generate tag for URBG instance"); + + static constexpr size_t kResultBits = + std::numeric_limits::digits; + static constexpr size_t kUrbgBits = NumBits(); + static constexpr size_t kIters = + (kResultBits / kUrbgBits) + (kResultBits % kUrbgBits != 0); + static constexpr size_t kShift = (kIters == 1) ? 0 : kUrbgBits; + static constexpr auto kMin = (URBG::min)(); + + result_type r = static_cast(g() - kMin); + for (size_t n = 1; n < kIters; ++n) + { + r = static_cast(r << kShift) + + static_cast(g() - kMin); + } + return r; + } + + template + template + typename FastUniformBits::result_type + FastUniformBits::Generate(URBG& g, // NOLINT(runtime/references) + RejectionLoopTag) + { + static_assert(!IsPowerOfTwoOrZero(RangeSize()), "incorrect Generate tag for URBG instance"); + using urbg_result_type = typename URBG::result_type; + + // See [rand.adapt.ibits] for more details on the constants calculated below. + // + // It is preferable to use roughly the same number of bits from each generator + // call, however this is only possible when the number of bits provided by the + // URBG is a divisor of the number of bits in `result_type`. In all other + // cases, the number of bits used cannot always be the same, but it can be + // guaranteed to be off by at most 1. Thus we run two loops, one with a + // smaller bit-width size (`kSmallWidth`) and one with a larger width size + // (satisfying `kLargeWidth == kSmallWidth + 1`). The loops are run + // `kSmallIters` and `kLargeIters` times respectively such + // that + // + // `kResultBits == kSmallIters * kSmallBits + // + kLargeIters * kLargeBits` + // + // where `kResultBits` is the total number of bits in `result_type`. + // + static constexpr size_t kResultBits = + std::numeric_limits::digits; // w + static constexpr urbg_result_type kUrbgRange = RangeSize(); // R + static constexpr size_t kUrbgBits = NumBits(); // m + + // compute the initial estimate of the bits used. + // [rand.adapt.ibits] 2 (c) + static constexpr size_t kA = // ceil(w/m) + (kResultBits / kUrbgBits) + ((kResultBits % kUrbgBits) != 0); // n' + + static constexpr size_t kABits = kResultBits / kA; // w0' + static constexpr urbg_result_type kARejection = + ((kUrbgRange >> kABits) << kABits); // y0' + + // refine the selection to reduce the rejection frequency. + static constexpr size_t kTotalIters = + ((kUrbgRange - kARejection) <= (kARejection / kA)) ? kA : (kA + 1); // n + + // [rand.adapt.ibits] 2 (b) + static constexpr size_t kSmallIters = + kTotalIters - (kResultBits % kTotalIters); // n0 + static constexpr size_t kSmallBits = kResultBits / kTotalIters; // w0 + static constexpr urbg_result_type kSmallRejection = + ((kUrbgRange >> kSmallBits) << kSmallBits); // y0 + + static constexpr size_t kLargeBits = kSmallBits + 1; // w0+1 + static constexpr urbg_result_type kLargeRejection = + ((kUrbgRange >> kLargeBits) << kLargeBits); // y1 + + // + // Because `kLargeBits == kSmallBits + 1`, it follows that + // + // `kResultBits == kSmallIters * kSmallBits + kLargeIters` + // + // and therefore + // + // `kLargeIters == kTotalWidth % kSmallWidth` + // + // Intuitively, each iteration with the large width accounts for one unit + // of the remainder when `kTotalWidth` is divided by `kSmallWidth`. As + // mentioned above, if the URBG width is a divisor of `kTotalWidth`, then + // there would be no need for any large iterations (i.e., one loop would + // suffice), and indeed, in this case, `kLargeIters` would be zero. + static_assert(kResultBits == kSmallIters * kSmallBits + (kTotalIters - kSmallIters) * kLargeBits, "Error in looping constant calculations."); + + // The small shift is essentially small bits, but due to the potential + // of generating a smaller result_type from a larger urbg type, the actual + // shift might be 0. + static constexpr size_t kSmallShift = kSmallBits % kResultBits; + static constexpr auto kSmallMask = + MaskFromShift(kSmallShift); + static constexpr size_t kLargeShift = kLargeBits % kResultBits; + static constexpr auto kLargeMask = + MaskFromShift(kLargeShift); + + static constexpr auto kMin = (URBG::min)(); + + result_type s = 0; + for (size_t n = 0; n < kSmallIters; ++n) + { + urbg_result_type v; + do + { + v = g() - kMin; + } while (v >= kSmallRejection); + + s = (s << kSmallShift) + static_cast(v & kSmallMask); + } + + for (size_t n = kSmallIters; n < kTotalIters; ++n) + { + urbg_result_type v; + do + { + v = g() - kMin; + } while (v >= kLargeRejection); + + s = (s << kLargeShift) + static_cast(v & kLargeMask); + } + return s; + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_FAST_UNIFORM_BITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/fastmath.h b/CAPI/cpp/grpc/include/absl/random/internal/fastmath.h new file mode 100644 index 00000000..a14ad444 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/fastmath.h @@ -0,0 +1,62 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_FASTMATH_H_ +#define ABSL_RANDOM_INTERNAL_FASTMATH_H_ + +// This file contains fast math functions (bitwise ops as well as some others) +// which are implementation details of various absl random number distributions. + +#include +#include +#include + +#include "absl/numeric/bits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // Compute log2(n) using integer operations. + // While std::log2 is more accurate than std::log(n) / std::log(2), for + // very large numbers--those close to std::numeric_limits::max() - 2, + // for instance--std::log2 rounds up rather than down, which introduces + // definite skew in the results. + inline int IntLog2Floor(uint64_t n) + { + return (n <= 1) ? 0 : (63 - countl_zero(n)); + } + inline int IntLog2Ceil(uint64_t n) + { + return (n <= 1) ? 0 : (64 - countl_zero(n - 1)); + } + + inline double StirlingLogFactorial(double n) + { + assert(n >= 1); + // Using Stirling's approximation. + constexpr double kLog2PI = 1.83787706640934548356; + const double logn = std::log(n); + const double ninv = 1.0 / static_cast(n); + return n * logn - n + 0.5 * (kLog2PI + logn) + (1.0 / 12.0) * ninv - + (1.0 / 360.0) * ninv * ninv * ninv; + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_FASTMATH_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/generate_real.h b/CAPI/cpp/grpc/include/absl/random/internal/generate_real.h new file mode 100644 index 00000000..1f50e221 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/generate_real.h @@ -0,0 +1,149 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_GENERATE_REAL_H_ +#define ABSL_RANDOM_INTERNAL_GENERATE_REAL_H_ + +// This file contains some implementation details which are used by one or more +// of the absl random number distributions. + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/random/internal/fastmath.h" +#include "absl/random/internal/traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // Tristate tag types controlling the output of GenerateRealFromBits. + struct GeneratePositiveTag + { + }; + struct GenerateNegativeTag + { + }; + struct GenerateSignedTag + { + }; + + // GenerateRealFromBits generates a single real value from a single 64-bit + // `bits` with template fields controlling the output. + // + // The `SignedTag` parameter controls whether positive, negative, + // or either signed/unsigned may be returned. + // When SignedTag == GeneratePositiveTag, range is U(0, 1) + // When SignedTag == GenerateNegativeTag, range is U(-1, 0) + // When SignedTag == GenerateSignedTag, range is U(-1, 1) + // + // When the `IncludeZero` parameter is true, the function may return 0 for some + // inputs, otherwise it never returns 0. + // + // When a value in U(0,1) is required, use: + // GenerateRealFromBits; + // + // When a value in U(-1,1) is required, use: + // GenerateRealFromBits; + // + // This generates more distinct values than the mathematical equivalent + // `U(0, 1) * 2.0 - 1.0`. + // + // Scaling the result by powers of 2 (and avoiding a multiply) is also possible: + // GenerateRealFromBits(..., -1); => U(0, 0.5) + // GenerateRealFromBits(..., 1); => U(0, 2) + // + template + inline RealType GenerateRealFromBits(uint64_t bits, int exp_bias = 0) + { + using real_type = RealType; + using uint_type = absl::conditional_t::value, uint32_t, uint64_t>; + + static_assert( + (std::is_same::value || + std::is_same::value), + "GenerateRealFromBits must be parameterized by either float or double." + ); + + static_assert(sizeof(uint_type) == sizeof(real_type), "Mismatched unsigned and real types."); + + static_assert((std::numeric_limits::is_iec559 && std::numeric_limits::radix == 2), "RealType representation is not IEEE 754 binary."); + + static_assert((std::is_same::value || std::is_same::value || std::is_same::value), ""); + + static constexpr int kExp = std::numeric_limits::digits - 1; + static constexpr uint_type kMask = (static_cast(1) << kExp) - 1u; + static constexpr int kUintBits = sizeof(uint_type) * 8; + + int exp = exp_bias + int{std::numeric_limits::max_exponent - 2}; + + // Determine the sign bit. + // Depending on the SignedTag, this may use the left-most bit + // or it may be a constant value. + uint_type sign = std::is_same::value ? (static_cast(1) << (kUintBits - 1)) : 0; + if (std::is_same::value) + { + if (std::is_same::value) + { + sign = bits & uint64_t{0x8000000000000000}; + } + if (std::is_same::value) + { + const uint64_t tmp = bits & uint64_t{0x8000000000000000}; + sign = static_cast(tmp >> 32); + } + // adjust the bits and the exponent to account for removing + // the leading bit. + bits = bits & uint64_t{0x7FFFFFFFFFFFFFFF}; + exp++; + } + if (IncludeZero) + { + if (bits == 0u) + return 0; + } + + // Number of leading zeros is mapped to the exponent: 2^-clz + // bits is 0..01xxxxxx. After shifting, we're left with 1xxx...0..0 + int clz = countl_zero(bits); + bits <<= (IncludeZero ? clz : (clz & 63)); // remove 0-bits. + exp -= clz; // set the exponent. + bits >>= (63 - kExp); + + // Construct the 32-bit or 64-bit IEEE 754 floating-point value from + // the individual fields: sign, exp, mantissa(bits). + uint_type val = sign | (static_cast(exp) << kExp) | + (static_cast(bits) & kMask); + + // bit_cast to the output-type + real_type result; + memcpy(static_cast(&result), static_cast(&val), sizeof(result)); + return result; + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_GENERATE_REAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/iostream_state_saver.h b/CAPI/cpp/grpc/include/absl/random/internal/iostream_state_saver.h new file mode 100644 index 00000000..06e14722 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/iostream_state_saver.h @@ -0,0 +1,280 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_IOSTREAM_STATE_SAVER_H_ +#define ABSL_RANDOM_INTERNAL_IOSTREAM_STATE_SAVER_H_ + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/numeric/int128.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // The null_state_saver does nothing. + template + class null_state_saver + { + public: + using stream_type = T; + using flags_type = std::ios_base::fmtflags; + + null_state_saver(T&, flags_type) + { + } + ~null_state_saver() + { + } + }; + + // ostream_state_saver is a RAII object to save and restore the common + // basic_ostream flags used when implementing `operator <<()` on any of + // the absl random distributions. + template + class ostream_state_saver + { + public: + using ostream_type = OStream; + using flags_type = std::ios_base::fmtflags; + using fill_type = typename ostream_type::char_type; + using precision_type = std::streamsize; + + ostream_state_saver(ostream_type& os, // NOLINT(runtime/references) + flags_type flags, + fill_type fill) : + os_(os), + flags_(os.flags(flags)), + fill_(os.fill(fill)), + precision_(os.precision()) + { + // Save state in initialized variables. + } + + ~ostream_state_saver() + { + // Restore saved state. + os_.precision(precision_); + os_.fill(fill_); + os_.flags(flags_); + } + + private: + ostream_type& os_; + const flags_type flags_; + const fill_type fill_; + const precision_type precision_; + }; + +#if defined(__NDK_MAJOR__) && __NDK_MAJOR__ < 16 +#define ABSL_RANDOM_INTERNAL_IOSTREAM_HEXFLOAT 1 +#else +#define ABSL_RANDOM_INTERNAL_IOSTREAM_HEXFLOAT 0 +#endif + + template + ostream_state_saver> make_ostream_state_saver( + std::basic_ostream& os, // NOLINT(runtime/references) + std::ios_base::fmtflags flags = std::ios_base::dec | std::ios_base::left | +#if ABSL_RANDOM_INTERNAL_IOSTREAM_HEXFLOAT + std::ios_base::fixed | +#endif + std::ios_base::scientific + ) + { + using result_type = ostream_state_saver>; + return result_type(os, flags, os.widen(' ')); + } + + template + typename absl::enable_if_t::value, + null_state_saver> + make_ostream_state_saver(T& is, // NOLINT(runtime/references) + std::ios_base::fmtflags flags = std::ios_base::dec) + { + std::cerr << "null_state_saver"; + using result_type = null_state_saver; + return result_type(is, flags); + } + + // stream_precision_helper::kPrecision returns the base 10 precision + // required to stream and reconstruct a real type exact binary value through + // a binary->decimal->binary transition. + template + struct stream_precision_helper + { + // max_digits10 may be 0 on MSVC; if so, use digits10 + 3. + static constexpr int kPrecision = + (std::numeric_limits::max_digits10 > std::numeric_limits::digits10) ? std::numeric_limits::max_digits10 : (std::numeric_limits::digits10 + 3); + }; + + template<> + struct stream_precision_helper + { + static constexpr int kPrecision = 9; + }; + template<> + struct stream_precision_helper + { + static constexpr int kPrecision = 17; + }; + template<> + struct stream_precision_helper + { + static constexpr int kPrecision = 36; // assuming fp128 + }; + + // istream_state_saver is a RAII object to save and restore the common + // std::basic_istream<> flags used when implementing `operator >>()` on any of + // the absl random distributions. + template + class istream_state_saver + { + public: + using istream_type = IStream; + using flags_type = std::ios_base::fmtflags; + + istream_state_saver(istream_type& is, // NOLINT(runtime/references) + flags_type flags) : + is_(is), + flags_(is.flags(flags)) + { + } + + ~istream_state_saver() + { + is_.flags(flags_); + } + + private: + istream_type& is_; + flags_type flags_; + }; + + template + istream_state_saver> make_istream_state_saver( + std::basic_istream& is, // NOLINT(runtime/references) + std::ios_base::fmtflags flags = std::ios_base::dec | + std::ios_base::scientific | + std::ios_base::skipws + ) + { + using result_type = istream_state_saver>; + return result_type(is, flags); + } + + template + typename absl::enable_if_t::value, + null_state_saver> + make_istream_state_saver(T& is, // NOLINT(runtime/references) + std::ios_base::fmtflags flags = std::ios_base::dec) + { + using result_type = null_state_saver; + return result_type(is, flags); + } + + // stream_format_type is a helper struct to convert types which + // basic_iostream cannot output as decimal numbers into types which + // basic_iostream can output as decimal numbers. Specifically: + // * signed/unsigned char-width types are converted to int. + // * TODO(lar): __int128 => uint128, except there is no operator << yet. + // + template + struct stream_format_type : public std::conditional<(sizeof(T) == sizeof(char)), int, T> + { + }; + + // stream_u128_helper allows us to write out either absl::uint128 or + // __uint128_t types in the same way, which enables their use as internal + // state of PRNG engines. + template + struct stream_u128_helper; + + template<> + struct stream_u128_helper + { + template + inline absl::uint128 read(IStream& in) + { + uint64_t h = 0; + uint64_t l = 0; + in >> h >> l; + return absl::MakeUint128(h, l); + } + + template + inline void write(absl::uint128 val, OStream& out) + { + uint64_t h = absl::Uint128High64(val); + uint64_t l = absl::Uint128Low64(val); + out << h << out.fill() << l; + } + }; + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + template<> + struct stream_u128_helper<__uint128_t> + { + template + inline __uint128_t read(IStream& in) + { + uint64_t h = 0; + uint64_t l = 0; + in >> h >> l; + return (static_cast<__uint128_t>(h) << 64) | l; + } + + template + inline void write(__uint128_t val, OStream& out) + { + uint64_t h = static_cast(val >> 64u); + uint64_t l = static_cast(val); + out << h << out.fill() << l; + } + }; +#endif + + template + inline FloatType read_floating_point(IStream& is) + { + static_assert(std::is_floating_point::value, ""); + FloatType dest; + is >> dest; + // Parsing a double value may report a subnormal value as an error + // despite being able to represent it. + // See https://stackoverflow.com/q/52410931/3286653 + // It may also report an underflow when parsing DOUBLE_MIN as an + // ERANGE error, as the parsed value may be smaller than DOUBLE_MIN + // and rounded up. + // See: https://stackoverflow.com/q/42005462 + if (is.fail() && + (std::fabs(dest) == (std::numeric_limits::min)() || + std::fpclassify(dest) == FP_SUBNORMAL)) + { + is.clear(is.rdstate() & (~std::ios_base::failbit)); + } + return dest; + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_IOSTREAM_STATE_SAVER_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/mock_helpers.h b/CAPI/cpp/grpc/include/absl/random/internal/mock_helpers.h new file mode 100644 index 00000000..cd5c5806 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/mock_helpers.h @@ -0,0 +1,142 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ +#define ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ + +#include +#include +#include + +#include "absl/base/internal/fast_type_id.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // MockHelpers works in conjunction with MockOverloadSet, MockingBitGen, and + // BitGenRef to enable the mocking capability for absl distribution functions. + // + // MockingBitGen registers mocks based on the typeid of a mock signature, KeyT, + // which is used to generate a unique id. + // + // KeyT is a signature of the form: + // result_type(discriminator_type, std::tuple) + // The mocked function signature will be composed from KeyT as: + // result_type(args...) + // + class MockHelpers + { + using IdType = ::absl::base_internal::FastTypeIdType; + + // Given a key signature type used to index the mock, extract the components. + // KeyT is expected to have the form: + // result_type(discriminator_type, arg_tuple_type) + template + struct KeySignature; + + template + struct KeySignature + { + using result_type = ResultT; + using discriminator_type = DiscriminatorT; + using arg_tuple_type = ArgTupleT; + }; + + // Detector for InvokeMock. + template + using invoke_mock_t = decltype(std::declval()->InvokeMock( + std::declval(), std::declval(), std::declval() + )); + + // Empty implementation of InvokeMock. + template + static absl::optional InvokeMockImpl(char, URBG*, Args&&...) + { + return absl::nullopt; + } + + // Non-empty implementation of InvokeMock. + template, typename... Args> + static absl::optional InvokeMockImpl(int, URBG* urbg, Args&&... args) + { + ArgTupleT arg_tuple(std::forward(args)...); + ReturnT result; + if (urbg->InvokeMock(::absl::base_internal::FastTypeId(), &arg_tuple, &result)) + { + return result; + } + return absl::nullopt; + } + + public: + // InvokeMock is private; this provides access for some specialized use cases. + template + static inline bool PrivateInvokeMock(URBG* urbg, IdType type, void* args_tuple, void* result) + { + return urbg->InvokeMock(type, args_tuple, result); + } + + // Invoke a mock for the KeyT (may or may not be a signature). + // + // KeyT is used to generate a typeid-based lookup key for the mock. + // KeyT is a signature of the form: + // result_type(discriminator_type, std::tuple) + // The mocked function signature will be composed from KeyT as: + // result_type(args...) + // + // An instance of arg_tuple_type must be constructable from Args..., since + // the underlying mechanism requires a pointer to an argument tuple. + template + static auto MaybeInvokeMock(URBG* urbg, Args&&... args) + -> absl::optional::result_type> + { + // Use function overloading to dispatch to the implementation since + // more modern patterns (e.g. require + constexpr) are not supported in all + // compiler configurations. + return InvokeMockImpl::result_type, typename KeySignature::arg_tuple_type, URBG>( + 0, urbg, std::forward(args)... + ); + } + + // Acquire a mock for the KeyT (may or may not be a signature). + // + // KeyT is used to generate a typeid-based lookup for the mock. + // KeyT is a signature of the form: + // result_type(discriminator_type, std::tuple) + // The mocked function signature will be composed from KeyT as: + // result_type(args...) + template + static auto MockFor(MockURBG& m) + -> decltype(m.template RegisterMock< + typename KeySignature::result_type, + typename KeySignature::arg_tuple_type>( + m, std::declval() + )) + { + return m.template RegisterMock::result_type, typename KeySignature::arg_tuple_type>( + m, ::absl::base_internal::FastTypeId() + ); + } + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/mock_overload_set.h b/CAPI/cpp/grpc/include/absl/random/internal/mock_overload_set.h new file mode 100644 index 00000000..578f65cd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/mock_overload_set.h @@ -0,0 +1,100 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_MOCK_OVERLOAD_SET_H_ +#define ABSL_RANDOM_INTERNAL_MOCK_OVERLOAD_SET_H_ + +#include + +#include "gmock/gmock.h" +#include "absl/random/internal/mock_helpers.h" +#include "absl/random/mocking_bit_gen.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + template + struct MockSingleOverload; + + // MockSingleOverload + // + // MockSingleOverload hooks in to gMock's `ON_CALL` and `EXPECT_CALL` macros. + // EXPECT_CALL(mock_single_overload, Call(...))` will expand to a call to + // `mock_single_overload.gmock_Call(...)`. Because expectations are stored on + // the MockingBitGen (an argument passed inside `Call(...)`), this forwards to + // arguments to MockingBitGen::Register. + // + // The underlying KeyT must match the KeyT constructed by DistributionCaller. + template + struct MockSingleOverload + { + static_assert(std::is_same::value, "Overload signature must have return type matching the " + "distribution result_type."); + using KeyT = Ret(DistrT, std::tuple); + + template + auto gmock_Call(MockURBG& gen, const ::testing::Matcher&... matchers) + -> decltype(MockHelpers::MockFor(gen).gmock_Call(matchers...)) + { + static_assert(std::is_base_of::value, "Mocking requires an absl::MockingBitGen"); + return MockHelpers::MockFor(gen).gmock_Call(matchers...); + } + }; + + template + struct MockSingleOverload + { + static_assert(std::is_same::value, "Overload signature must have return type matching the " + "distribution result_type."); + using KeyT = Ret(DistrT, std::tuple); + + template + auto gmock_Call(const ::testing::Matcher& matcher, MockURBG& gen, const ::testing::Matcher&... matchers) + -> decltype(MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...)) + { + static_assert(std::is_base_of::value, "Mocking requires an absl::MockingBitGen"); + return MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...); + } + }; + + // MockOverloadSet + // + // MockOverloadSet takes a distribution and a collection of signatures and + // performs overload resolution amongst all the overloads. This makes + // `EXPECT_CALL(mock_overload_set, Call(...))` expand and do overload resolution + // correctly. + template + struct MockOverloadSet; + + template + struct MockOverloadSet : public MockSingleOverload + { + using MockSingleOverload::gmock_Call; + }; + + template + struct MockOverloadSet : public MockSingleOverload, public MockOverloadSet + { + using MockSingleOverload::gmock_Call; + using MockOverloadSet::gmock_Call; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_RANDOM_INTERNAL_MOCK_OVERLOAD_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/nanobenchmark.h b/CAPI/cpp/grpc/include/absl/random/internal/nanobenchmark.h new file mode 100644 index 00000000..44ab0903 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/nanobenchmark.h @@ -0,0 +1,171 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_ +#define ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_ + +// Benchmarks functions of a single integer argument with realistic branch +// prediction hit rates. Uses a robust estimator to summarize the measurements. +// The precision is about 0.2%. +// +// Examples: see nanobenchmark_test.cc. +// +// Background: Microbenchmarks such as http://github.com/google/benchmark +// can measure elapsed times on the order of a microsecond. Shorter functions +// are typically measured by repeating them thousands of times and dividing +// the total elapsed time by this count. Unfortunately, repetition (especially +// with the same input parameter!) influences the runtime. In time-critical +// code, it is reasonable to expect warm instruction/data caches and TLBs, +// but a perfect record of which branches will be taken is unrealistic. +// Unless the application also repeatedly invokes the measured function with +// the same parameter, the benchmark is measuring something very different - +// a best-case result, almost as if the parameter were made a compile-time +// constant. This may lead to erroneous conclusions about branch-heavy +// algorithms outperforming branch-free alternatives. +// +// Our approach differs in three ways. Adding fences to the timer functions +// reduces variability due to instruction reordering, improving the timer +// resolution to about 40 CPU cycles. However, shorter functions must still +// be invoked repeatedly. For more realistic branch prediction performance, +// we vary the input parameter according to a user-specified distribution. +// Thus, instead of VaryInputs(Measure(Repeat(func))), we change the +// loop nesting to Measure(Repeat(VaryInputs(func))). We also estimate the +// central tendency of the measurement samples with the "half sample mode", +// which is more robust to outliers and skewed data than the mean or median. + +// NOTE: for compatibility with multiple translation units compiled with +// distinct flags, avoid #including headers that define functions. + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal_nanobenchmark + { + + // Input influencing the function being measured (e.g. number of bytes to copy). + using FuncInput = size_t; + + // "Proof of work" returned by Func to ensure the compiler does not elide it. + using FuncOutput = uint64_t; + + // Function to measure: either 1) a captureless lambda or function with two + // arguments or 2) a lambda with capture, in which case the first argument + // is reserved for use by MeasureClosure. + using Func = FuncOutput (*)(const void*, FuncInput); + + // Internal parameters that determine precision/resolution/measuring time. + struct Params + { + // For measuring timer overhead/resolution. Used in a nested loop => + // quadratic time, acceptable because we know timer overhead is "low". + // constexpr because this is used to define array bounds. + static constexpr size_t kTimerSamples = 256; + + // Best-case precision, expressed as a divisor of the timer resolution. + // Larger => more calls to Func and higher precision. + size_t precision_divisor = 1024; + + // Ratio between full and subset input distribution sizes. Cannot be less + // than 2; larger values increase measurement time but more faithfully + // model the given input distribution. + size_t subset_ratio = 2; + + // Together with the estimated Func duration, determines how many times to + // call Func before checking the sample variability. Larger values increase + // measurement time, memory/cache use and precision. + double seconds_per_eval = 4E-3; + + // The minimum number of samples before estimating the central tendency. + size_t min_samples_per_eval = 7; + + // The mode is better than median for estimating the central tendency of + // skewed/fat-tailed distributions, but it requires sufficient samples + // relative to the width of half-ranges. + size_t min_mode_samples = 64; + + // Maximum permissible variability (= median absolute deviation / center). + double target_rel_mad = 0.002; + + // Abort after this many evals without reaching target_rel_mad. This + // prevents infinite loops. + size_t max_evals = 9; + + // Retry the measure loop up to this many times. + size_t max_measure_retries = 2; + + // Whether to print additional statistics to stdout. + bool verbose = true; + }; + + // Measurement result for each unique input. + struct Result + { + FuncInput input; + + // Robust estimate (mode or median) of duration. + float ticks; + + // Measure of variability (median absolute deviation relative to "ticks"). + float variability; + }; + + // Ensures the thread is running on the specified cpu, and no others. + // Reduces noise due to desynchronized socket RDTSC and context switches. + // If "cpu" is negative, pin to the currently running core. + void PinThreadToCPU(const int cpu = -1); + + // Returns tick rate, useful for converting measurements to seconds. Invariant + // means the tick counter frequency is independent of CPU throttling or sleep. + // This call may be expensive, callers should cache the result. + double InvariantTicksPerSecond(); + + // Precisely measures the number of ticks elapsed when calling "func" with the + // given inputs, shuffled to ensure realistic branch prediction hit rates. + // + // "func" returns a 'proof of work' to ensure its computations are not elided. + // "arg" is passed to Func, or reserved for internal use by MeasureClosure. + // "inputs" is an array of "num_inputs" (not necessarily unique) arguments to + // "func". The values should be chosen to maximize coverage of "func". This + // represents a distribution, so a value's frequency should reflect its + // probability in the real application. Order does not matter; for example, a + // uniform distribution over [0, 4) could be represented as {3,0,2,1}. + // Returns how many Result were written to "results": one per unique input, or + // zero if the measurement failed (an error message goes to stderr). + size_t Measure(const Func func, const void* arg, const FuncInput* inputs, const size_t num_inputs, Result* results, const Params& p = Params()); + + // Calls operator() of the given closure (lambda function). + template + static FuncOutput CallClosure(const void* f, const FuncInput input) + { + return (*reinterpret_cast(f))(input); + } + + // Same as Measure, except "closure" is typically a lambda function of + // FuncInput -> FuncOutput with a capture list. + template + static inline size_t MeasureClosure(const Closure& closure, const FuncInput* inputs, const size_t num_inputs, Result* results, const Params& p = Params()) + { + return Measure(reinterpret_cast(&CallClosure), reinterpret_cast(&closure), inputs, num_inputs, results, p); + } + + } // namespace random_internal_nanobenchmark + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/nonsecure_base.h b/CAPI/cpp/grpc/include/absl/random/internal/nonsecure_base.h new file mode 100644 index 00000000..2c9e3b5b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/nonsecure_base.h @@ -0,0 +1,196 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_ +#define ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/pool_urbg.h" +#include "absl/random/internal/salted_seed_seq.h" +#include "absl/random/internal/seed_material.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RandenPoolSeedSeq is a custom seed sequence type where generate() fills the + // provided buffer via the RandenPool entropy source. + class RandenPoolSeedSeq + { + private: + struct ContiguousTag + { + }; + struct BufferTag + { + }; + + // Generate random unsigned values directly into the buffer. + template + void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) + { + const size_t n = static_cast(std::distance(begin, end)); + auto* a = &(*begin); + RandenPool::Fill( + absl::MakeSpan(reinterpret_cast(a), sizeof(*a) * n) + ); + } + + // Construct a buffer of size n and fill it with values, then copy + // those values into the seed iterators. + template + void generate_impl(BufferTag, RandomAccessIterator begin, RandomAccessIterator end) + { + const size_t n = std::distance(begin, end); + absl::InlinedVector data(n, 0); + RandenPool::Fill(absl::MakeSpan(data.begin(), data.end())); + std::copy(std::begin(data), std::end(data), begin); + } + + public: + using result_type = uint32_t; + + size_t size() + { + return 0; + } + + template + void param(OutIterator) const + { + } + + template + void generate(RandomAccessIterator begin, RandomAccessIterator end) + { + // RandomAccessIterator must be assignable from uint32_t + if (begin != end) + { + using U = typename std::iterator_traits::value_type; + // ContiguousTag indicates the common case of a known contiguous buffer, + // which allows directly filling the buffer. In C++20, + // std::contiguous_iterator_tag provides a mechanism for testing this + // capability, however until Abseil's support requirements allow us to + // assume C++20, limit checks to a few common cases. + using TagType = absl::conditional_t< + (std::is_pointer::value || + std::is_same::iterator>::value), + ContiguousTag, + BufferTag>; + + generate_impl(TagType{}, begin, end); + } + } + }; + + // Each instance of NonsecureURBGBase will be seeded by variates produced + // by a thread-unique URBG-instance. + template + class NonsecureURBGBase + { + public: + using result_type = typename URBG::result_type; + + // Default constructor + NonsecureURBGBase() : + urbg_(ConstructURBG()) + { + } + + // Copy disallowed, move allowed. + NonsecureURBGBase(const NonsecureURBGBase&) = delete; + NonsecureURBGBase& operator=(const NonsecureURBGBase&) = delete; + NonsecureURBGBase(NonsecureURBGBase&&) = default; + NonsecureURBGBase& operator=(NonsecureURBGBase&&) = default; + + // Constructor using a seed + template::value>> + explicit NonsecureURBGBase(SSeq&& seq) : + urbg_(ConstructURBG(std::forward(seq))) + { + } + + // Note: on MSVC, min() or max() can be interpreted as MIN() or MAX(), so we + // enclose min() or max() in parens as (min)() and (max)(). + // Additionally, clang-format requires no space before this construction. + + // NonsecureURBGBase::min() + static constexpr result_type(min)() + { + return (URBG::min)(); + } + + // NonsecureURBGBase::max() + static constexpr result_type(max)() + { + return (URBG::max)(); + } + + // NonsecureURBGBase::operator()() + result_type operator()() + { + return urbg_(); + } + + // NonsecureURBGBase::discard() + void discard(unsigned long long values) + { // NOLINT(runtime/int) + urbg_.discard(values); + } + + bool operator==(const NonsecureURBGBase& other) const + { + return urbg_ == other.urbg_; + } + + bool operator!=(const NonsecureURBGBase& other) const + { + return !(urbg_ == other.urbg_); + } + + private: + static URBG ConstructURBG() + { + Seeder seeder; + return URBG(seeder); + } + + template + static URBG ConstructURBG(SSeq&& seq) + { // NOLINT(runtime/references) + auto salted_seq = + random_internal::MakeSaltedSeedSeq(std::forward(seq)); + return URBG(salted_seq); + } + + URBG urbg_; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/pcg_engine.h b/CAPI/cpp/grpc/include/absl/random/internal/pcg_engine.h new file mode 100644 index 00000000..05dc9e58 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/pcg_engine.h @@ -0,0 +1,328 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ +#define ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ + +#include + +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" +#include "absl/random/internal/fastmath.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // pcg_engine is a simplified implementation of Melissa O'Neil's PCG engine in + // C++. PCG combines a linear congruential generator (LCG) with output state + // mixing functions to generate each random variate. pcg_engine supports only a + // single sequence (oneseq), and does not support streams. + // + // pcg_engine is parameterized by two types: + // Params, which provides the multiplier and increment values; + // Mix, which mixes the state into the result. + // + template + class pcg_engine + { + static_assert(std::is_same::value, "Class-template absl::pcg_engine must be parameterized by " + "Params and Mix with identical state_type"); + + static_assert(std::is_unsigned::value, "Class-template absl::pcg_engine must be parameterized by " + "an unsigned Mix::result_type"); + + using params_type = Params; + using mix_type = Mix; + using state_type = typename Mix::state_type; + + public: + // C++11 URBG interface: + using result_type = typename Mix::result_type; + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + explicit pcg_engine(uint64_t seed_value = 0) + { + seed(seed_value); + } + + template::value>> + explicit pcg_engine(SeedSequence&& seq) + { + seed(seq); + } + + pcg_engine(const pcg_engine&) = default; + pcg_engine& operator=(const pcg_engine&) = default; + pcg_engine(pcg_engine&&) = default; + pcg_engine& operator=(pcg_engine&&) = default; + + result_type operator()() + { + // Advance the LCG state, always using the new value to generate the output. + state_ = lcg(state_); + return Mix{}(state_); + } + + void seed(uint64_t seed_value = 0) + { + state_type tmp = seed_value; + state_ = lcg(tmp + Params::increment()); + } + + template + typename absl::enable_if_t< + !std::is_convertible::value, + void> + seed(SeedSequence&& seq) + { + reseed(seq); + } + + void discard(uint64_t count) + { + state_ = advance(state_, count); + } + + bool operator==(const pcg_engine& other) const + { + return state_ == other.state_; + } + + bool operator!=(const pcg_engine& other) const + { + return !(*this == other); + } + + template + friend typename absl::enable_if_t<(sizeof(state_type) == 16), std::basic_ostream&> + operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const pcg_engine& engine + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + random_internal::stream_u128_helper helper; + helper.write(pcg_engine::params_type::multiplier(), os); + os << os.fill(); + helper.write(pcg_engine::params_type::increment(), os); + os << os.fill(); + helper.write(engine.state_, os); + return os; + } + + template + friend typename absl::enable_if_t<(sizeof(state_type) <= 8), std::basic_ostream&> + operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const pcg_engine& engine + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os << pcg_engine::params_type::multiplier() << os.fill(); + os << pcg_engine::params_type::increment() << os.fill(); + os << engine.state_; + return os; + } + + template + friend typename absl::enable_if_t<(sizeof(state_type) == 16), std::basic_istream&> + operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + pcg_engine& engine + ) + { // NOLINT(runtime/references) + random_internal::stream_u128_helper helper; + auto mult = helper.read(is); + auto inc = helper.read(is); + auto tmp = helper.read(is); + if (mult != pcg_engine::params_type::multiplier() || + inc != pcg_engine::params_type::increment()) + { + // signal failure by setting the failbit. + is.setstate(is.rdstate() | std::ios_base::failbit); + } + if (!is.fail()) + { + engine.state_ = tmp; + } + return is; + } + + template + friend typename absl::enable_if_t<(sizeof(state_type) <= 8), std::basic_istream&> + operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + pcg_engine& engine + ) + { // NOLINT(runtime/references) + state_type mult{}, inc{}, tmp{}; + is >> mult >> inc >> tmp; + if (mult != pcg_engine::params_type::multiplier() || + inc != pcg_engine::params_type::increment()) + { + // signal failure by setting the failbit. + is.setstate(is.rdstate() | std::ios_base::failbit); + } + if (!is.fail()) + { + engine.state_ = tmp; + } + return is; + } + + private: + state_type state_; + + // Returns the linear-congruential generator next state. + static inline constexpr state_type lcg(state_type s) + { + return s * Params::multiplier() + Params::increment(); + } + + // Returns the linear-congruential arbitrary seek state. + inline state_type advance(state_type s, uint64_t n) const + { + state_type mult = Params::multiplier(); + state_type inc = Params::increment(); + state_type m = 1; + state_type i = 0; + while (n > 0) + { + if (n & 1) + { + m *= mult; + i = i * mult + inc; + } + inc = (mult + 1) * inc; + mult *= mult; + n >>= 1; + } + return m * s + i; + } + + template + void reseed(SeedSequence& seq) + { + using sequence_result_type = typename SeedSequence::result_type; + constexpr size_t kBufferSize = + sizeof(state_type) / sizeof(sequence_result_type); + sequence_result_type buffer[kBufferSize]; + seq.generate(std::begin(buffer), std::end(buffer)); + // Convert the seed output to a single state value. + state_type tmp = buffer[0]; + for (size_t i = 1; i < kBufferSize; i++) + { + tmp <<= (sizeof(sequence_result_type) * 8); + tmp |= buffer[i]; + } + state_ = lcg(tmp + params_type::increment()); + } + }; + + // Parameterized implementation of the PCG 128-bit oneseq state. + // This provides state_type, multiplier, and increment for pcg_engine. + template + class pcg128_params + { + public: + using state_type = absl::uint128; + static inline constexpr state_type multiplier() + { + return absl::MakeUint128(kMultA, kMultB); + } + static inline constexpr state_type increment() + { + return absl::MakeUint128(kIncA, kIncB); + } + }; + + // Implementation of the PCG xsl_rr_128_64 128-bit mixing function, which + // accepts an input of state_type and mixes it into an output of result_type. + struct pcg_xsl_rr_128_64 + { + using state_type = absl::uint128; + using result_type = uint64_t; + + inline uint64_t operator()(state_type state) + { + // This is equivalent to the xsl_rr_128_64 mixing function. + uint64_t rotate = static_cast(state >> 122u); + state ^= state >> 64; + uint64_t s = static_cast(state); + return rotr(s, static_cast(rotate)); + } + }; + + // Parameterized implementation of the PCG 64-bit oneseq state. + // This provides state_type, multiplier, and increment for pcg_engine. + template + class pcg64_params + { + public: + using state_type = uint64_t; + static inline constexpr state_type multiplier() + { + return kMult; + } + static inline constexpr state_type increment() + { + return kInc; + } + }; + + // Implementation of the PCG xsh_rr_64_32 64-bit mixing function, which accepts + // an input of state_type and mixes it into an output of result_type. + struct pcg_xsh_rr_64_32 + { + using state_type = uint64_t; + using result_type = uint32_t; + inline uint32_t operator()(uint64_t state) + { + return rotr(static_cast(((state >> 18) ^ state) >> 27), state >> 59); + } + }; + + // Stable pcg_engine implementations: + // This is a 64-bit generator using 128-bits of state. + // The output sequence is equivalent to Melissa O'Neil's pcg64_oneseq. + using pcg64_2018_engine = pcg_engine< + random_internal::pcg128_params<0x2360ed051fc65da4ull, 0x4385df649fccf645ull, 0x5851f42d4c957f2d, 0x14057b7ef767814f>, + random_internal::pcg_xsl_rr_128_64>; + + // This is a 32-bit generator using 64-bits of state. + // This is equivalent to Melissa O'Neil's pcg32_oneseq. + using pcg32_2018_engine = pcg_engine< + random_internal::pcg64_params<0x5851f42d4c957f2dull, 0x14057b7ef767814full>, + random_internal::pcg_xsh_rr_64_32>; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/platform.h b/CAPI/cpp/grpc/include/absl/random/internal/platform.h new file mode 100644 index 00000000..d779f481 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/platform.h @@ -0,0 +1,171 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_PLATFORM_H_ +#define ABSL_RANDOM_INTERNAL_PLATFORM_H_ + +// HERMETIC NOTE: The randen_hwaes target must not introduce duplicate +// symbols from arbitrary system and other headers, since it may be built +// with different flags from other targets, using different levels of +// optimization, potentially introducing ODR violations. + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. +// +// NOTE: For __APPLE__ platforms, we use #include +// to distinguish os variants. +// +// http://nadeausoftware.com/articles/2012/01/c_c_tip_how_use_compiler_predefined_macros_detect_operating_system + +#if defined(__APPLE__) +#include +#endif + +// ----------------------------------------------------------------------------- +// Architecture Checks +// ----------------------------------------------------------------------------- + +// These preprocessor directives are trying to determine CPU architecture, +// including necessary headers to support hardware AES. +// +// ABSL_ARCH_{X86/PPC/ARM} macros determine the platform. +#if defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64) || \ + defined(_M_X64) +#define ABSL_ARCH_X86_64 +#elif defined(__i386) || defined(_M_IX86) +#define ABSL_ARCH_X86_32 +#elif defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) +#define ABSL_ARCH_AARCH64 +#elif defined(__arm__) || defined(__ARMEL__) || defined(_M_ARM) +#define ABSL_ARCH_ARM +#elif defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ + defined(__ppc__) || defined(__PPC__) +#define ABSL_ARCH_PPC +#else +// Unsupported architecture. +// * https://sourceforge.net/p/predef/wiki/Architectures/ +// * https://msdn.microsoft.com/en-us/library/b0084kay.aspx +// * for gcc, clang: "echo | gcc -E -dM -" +#endif + +// ----------------------------------------------------------------------------- +// Attribute Checks +// ----------------------------------------------------------------------------- + +// ABSL_RANDOM_INTERNAL_RESTRICT annotates whether pointers may be considered +// to be unaliased. +#if defined(__clang__) || defined(__GNUC__) +#define ABSL_RANDOM_INTERNAL_RESTRICT __restrict__ +#elif defined(_MSC_VER) +#define ABSL_RANDOM_INTERNAL_RESTRICT __restrict +#else +#define ABSL_RANDOM_INTERNAL_RESTRICT +#endif + +// ABSL_HAVE_ACCELERATED_AES indicates whether the currently active compiler +// flags (e.g. -maes) allow using hardware accelerated AES instructions, which +// implies us assuming that the target platform supports them. +#define ABSL_HAVE_ACCELERATED_AES 0 + +#if defined(ABSL_ARCH_X86_64) + +#if defined(__AES__) || defined(__AVX__) +#undef ABSL_HAVE_ACCELERATED_AES +#define ABSL_HAVE_ACCELERATED_AES 1 +#endif + +#elif defined(ABSL_ARCH_PPC) + +// Rely on VSX and CRYPTO extensions for vcipher on PowerPC. +#if (defined(__VEC__) || defined(__ALTIVEC__)) && defined(__VSX__) && \ + defined(__CRYPTO__) +#undef ABSL_HAVE_ACCELERATED_AES +#define ABSL_HAVE_ACCELERATED_AES 1 +#endif + +#elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64) + +// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0053c/IHI0053C_acle_2_0.pdf +// Rely on NEON+CRYPTO extensions for ARM. +#if defined(__ARM_NEON) && defined(__ARM_FEATURE_CRYPTO) +#undef ABSL_HAVE_ACCELERATED_AES +#define ABSL_HAVE_ACCELERATED_AES 1 +#endif + +#endif + +// NaCl does not allow AES. +#if defined(__native_client__) +#undef ABSL_HAVE_ACCELERATED_AES +#define ABSL_HAVE_ACCELERATED_AES 0 +#endif + +// ABSL_RANDOM_INTERNAL_AES_DISPATCH indicates whether the currently active +// platform has, or should use run-time dispatch for selecting the +// accelerated Randen implementation. +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0 + +#if defined(ABSL_ARCH_X86_64) +// Dispatch is available on x86_64 +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 1 +#elif defined(__linux__) && defined(ABSL_ARCH_PPC) +// Or when running linux PPC +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 1 +#elif defined(__linux__) && defined(ABSL_ARCH_AARCH64) +// Or when running linux AArch64 +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 1 +#elif defined(__linux__) && defined(ABSL_ARCH_ARM) && (__ARM_ARCH >= 8) +// Or when running linux ARM v8 or higher. +// (This captures a lot of Android configurations.) +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 1 +#endif + +// NaCl does not allow dispatch. +#if defined(__native_client__) +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0 +#endif + +// iOS does not support dispatch, even on x86, since applications +// should be bundled as fat binaries, with a different build tailored for +// each specific supported platform/architecture. +#if (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \ + (defined(TARGET_OS_IPHONE_SIMULATOR) && TARGET_OS_IPHONE_SIMULATOR) +#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH +#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0 +#endif + +#endif // ABSL_RANDOM_INTERNAL_PLATFORM_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/pool_urbg.h b/CAPI/cpp/grpc/include/absl/random/internal/pool_urbg.h new file mode 100644 index 00000000..64e557e0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/pool_urbg.h @@ -0,0 +1,148 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_POOL_URBG_H_ +#define ABSL_RANDOM_INTERNAL_POOL_URBG_H_ + +#include +#include + +#include "absl/random/internal/traits.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RandenPool is a thread-safe random number generator [random.req.urbg] that + // uses an underlying pool of Randen generators to generate values. Each thread + // has affinity to one instance of the underlying pool generators. Concurrent + // access is guarded by a spin-lock. + template + class RandenPool + { + public: + using result_type = T; + static_assert(std::is_unsigned::value, "RandenPool template argument must be a built-in unsigned " + "integer type"); + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + RandenPool() + { + } + + // Returns a single value. + inline result_type operator()() + { + return Generate(); + } + + // Fill data with random values. + static void Fill(absl::Span data); + + protected: + // Generate returns a single value. + static result_type Generate(); + }; + + extern template class RandenPool; + extern template class RandenPool; + extern template class RandenPool; + extern template class RandenPool; + + // PoolURBG uses an underlying pool of random generators to implement a + // thread-compatible [random.req.urbg] interface with an internal cache of + // values. + template + class PoolURBG + { + // Inheritance to access the protected static members of RandenPool. + using unsigned_type = typename make_unsigned_bits::type; + using PoolType = RandenPool; + using SpanType = absl::Span; + + static constexpr size_t kInitialBuffer = kBufferSize + 1; + static constexpr size_t kHalfBuffer = kBufferSize / 2; + + public: + using result_type = T; + + static_assert(std::is_unsigned::value, "PoolURBG must be parameterized by an unsigned integer type"); + + static_assert(kBufferSize > 1, "PoolURBG must be parameterized by a buffer-size > 1"); + + static_assert(kBufferSize <= 256, "PoolURBG must be parameterized by a buffer-size <= 256"); + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + PoolURBG() : + next_(kInitialBuffer) + { + } + + // copy-constructor does not copy cache. + PoolURBG(const PoolURBG&) : + next_(kInitialBuffer) + { + } + const PoolURBG& operator=(const PoolURBG&) + { + next_ = kInitialBuffer; + return *this; + } + + // move-constructor does move cache. + PoolURBG(PoolURBG&&) = default; + PoolURBG& operator=(PoolURBG&&) = default; + + inline result_type operator()() + { + if (next_ >= kBufferSize) + { + next_ = (kBufferSize > 2 && next_ > kBufferSize) ? kHalfBuffer : 0; + PoolType::Fill(SpanType(reinterpret_cast(state_ + next_), kBufferSize - next_)); + } + return state_[next_++]; + } + + private: + // Buffer size. + size_t next_; // index within state_ + result_type state_[kBufferSize]; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_POOL_URBG_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen.h b/CAPI/cpp/grpc/include/absl/random/internal/randen.h new file mode 100644 index 00000000..01e4d424 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_H_ + +#include + +#include "absl/random/internal/platform.h" +#include "absl/random/internal/randen_hwaes.h" +#include "absl/random/internal/randen_slow.h" +#include "absl/random/internal/randen_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RANDen = RANDom generator or beetroots in Swiss High German. + // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random + // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. + // + // Randen implements the basic state manipulation methods. + class Randen + { + public: + static constexpr size_t kStateBytes = RandenTraits::kStateBytes; + static constexpr size_t kCapacityBytes = RandenTraits::kCapacityBytes; + static constexpr size_t kSeedBytes = RandenTraits::kSeedBytes; + + ~Randen() = default; + + Randen(); + + // Generate updates the randen sponge. The outer portion of the sponge + // (kCapacityBytes .. kStateBytes) may be consumed as PRNG state. + // REQUIRES: state points to kStateBytes of state. + inline void Generate(void* state) const + { +#if ABSL_RANDOM_INTERNAL_AES_DISPATCH + // HW AES Dispatch. + if (has_crypto_) + { + RandenHwAes::Generate(keys_, state); + } + else + { + RandenSlow::Generate(keys_, state); + } +#elif ABSL_HAVE_ACCELERATED_AES + // HW AES is enabled. + RandenHwAes::Generate(keys_, state); +#else + // HW AES is disabled. + RandenSlow::Generate(keys_, state); +#endif + } + + // Absorb incorporates additional seed material into the randen sponge. After + // absorb returns, Generate must be called before the state may be consumed. + // REQUIRES: seed points to kSeedBytes of seed. + // REQUIRES: state points to kStateBytes of state. + inline void Absorb(const void* seed, void* state) const + { +#if ABSL_RANDOM_INTERNAL_AES_DISPATCH + // HW AES Dispatch. + if (has_crypto_) + { + RandenHwAes::Absorb(seed, state); + } + else + { + RandenSlow::Absorb(seed, state); + } +#elif ABSL_HAVE_ACCELERATED_AES + // HW AES is enabled. + RandenHwAes::Absorb(seed, state); +#else + // HW AES is disabled. + RandenSlow::Absorb(seed, state); +#endif + } + + private: + const void* keys_; +#if ABSL_RANDOM_INTERNAL_AES_DISPATCH + bool has_crypto_; +#endif + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen_detect.h b/CAPI/cpp/grpc/include/absl/random/internal/randen_detect.h new file mode 100644 index 00000000..a6a2f93b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen_detect.h @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_DETECT_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_DETECT_H_ + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // Returns whether the current CPU supports RandenHwAes implementation. + // This typically involves supporting cryptographic extensions on whichever + // platform is currently running. + bool CPUSupportsRandenHwAes(); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_DETECT_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen_engine.h b/CAPI/cpp/grpc/include/absl/random/internal/randen_engine.h new file mode 100644 index 00000000..a3e955af --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen_engine.h @@ -0,0 +1,298 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_ENGINE_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_ENGINE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/endian.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/randen.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // Deterministic pseudorandom byte generator with backtracking resistance + // (leaking the state does not compromise prior outputs). Based on Reverie + // (see "A Robust and Sponge-Like PRNG with Improved Efficiency") instantiated + // with an improved Simpira-like permutation. + // Returns values of type "T" (must be a built-in unsigned integer type). + // + // RANDen = RANDom generator or beetroots in Swiss High German. + // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random + // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. + template + class alignas(8) randen_engine + { + public: + // C++11 URBG interface: + using result_type = T; + static_assert(std::is_unsigned::value, "randen_engine template argument must be a built-in unsigned " + "integer type"); + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + randen_engine() : + randen_engine(0) + { + } + explicit randen_engine(result_type seed_value) + { + seed(seed_value); + } + + template::value>> + explicit randen_engine(SeedSequence&& seq) + { + seed(seq); + } + + // alignment requirements dictate custom copy and move constructors. + randen_engine(const randen_engine& other) : + next_(other.next_), + impl_(other.impl_) + { + std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type)); + } + randen_engine& operator=(const randen_engine& other) + { + next_ = other.next_; + impl_ = other.impl_; + std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type)); + return *this; + } + + // Returns random bits from the buffer in units of result_type. + result_type operator()() + { + // Refill the buffer if needed (unlikely). + auto* begin = state(); + if (next_ >= kStateSizeT) + { + next_ = kCapacityT; + impl_.Generate(begin); + } + return little_endian::ToHost(begin[next_++]); + } + + template + typename absl::enable_if_t< + !std::is_convertible::value> + seed(SeedSequence&& seq) + { + // Zeroes the state. + seed(); + reseed(seq); + } + + void seed(result_type seed_value = 0) + { + next_ = kStateSizeT; + // Zeroes the inner state and fills the outer state with seed_value to + // mimic the behaviour of reseed + auto* begin = state(); + std::fill(begin, begin + kCapacityT, 0); + std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value); + } + + // Inserts entropy into (part of) the state. Calling this periodically with + // sufficient entropy ensures prediction resistance (attackers cannot predict + // future outputs even if state is compromised). + template + void reseed(SeedSequence& seq) + { + using sequence_result_type = typename SeedSequence::result_type; + static_assert(sizeof(sequence_result_type) == 4, "SeedSequence::result_type must be 32-bit"); + constexpr size_t kBufferSize = + Randen::kSeedBytes / sizeof(sequence_result_type); + alignas(16) sequence_result_type buffer[kBufferSize]; + + // Randen::Absorb XORs the seed into state, which is then mixed by a call + // to Randen::Generate. Seeding with only the provided entropy is preferred + // to using an arbitrary generate() call, so use [rand.req.seed_seq] + // size as a proxy for the number of entropy units that can be generated + // without relying on seed sequence mixing... + const size_t entropy_size = seq.size(); + if (entropy_size < kBufferSize) + { + // ... and only request that many values, or 256-bits, when unspecified. + const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size; + std::fill(buffer + requested_entropy, buffer + kBufferSize, 0); + seq.generate(buffer, buffer + requested_entropy); +#ifdef ABSL_IS_BIG_ENDIAN + // Randen expects the seed buffer to be in Little Endian; reverse it on + // Big Endian platforms. + for (sequence_result_type& e : buffer) + { + e = absl::little_endian::FromHost(e); + } +#endif + // The Randen paper suggests preferentially initializing even-numbered + // 128-bit vectors of the randen state (there are 16 such vectors). + // The seed data is merged into the state offset by 128-bits, which + // implies preferring seed bytes [16..31, ..., 208..223]. Since the + // buffer is 32-bit values, we swap the corresponding buffer positions in + // 128-bit chunks. + size_t dst = kBufferSize; + while (dst > 7) + { + // leave the odd bucket as-is. + dst -= 4; + size_t src = dst >> 1; + // swap 128-bits into the even bucket + std::swap(buffer[--dst], buffer[--src]); + std::swap(buffer[--dst], buffer[--src]); + std::swap(buffer[--dst], buffer[--src]); + std::swap(buffer[--dst], buffer[--src]); + } + } + else + { + seq.generate(buffer, buffer + kBufferSize); + } + impl_.Absorb(buffer, state()); + + // Generate will be called when operator() is called + next_ = kStateSizeT; + } + + void discard(uint64_t count) + { + uint64_t step = std::min(kStateSizeT - next_, count); + count -= step; + + constexpr uint64_t kRateT = kStateSizeT - kCapacityT; + auto* begin = state(); + while (count > 0) + { + next_ = kCapacityT; + impl_.Generate(*reinterpret_cast(begin)); + step = std::min(kRateT, count); + count -= step; + } + next_ += step; + } + + bool operator==(const randen_engine& other) const + { + const auto* begin = state(); + return next_ == other.next_ && + std::equal(begin, begin + kStateSizeT, other.state()); + } + + bool operator!=(const randen_engine& other) const + { + return !(*this == other); + } + + template + friend std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const randen_engine& engine + ) + { // NOLINT(runtime/references) + using numeric_type = + typename random_internal::stream_format_type::type; + auto saver = random_internal::make_ostream_state_saver(os); + auto* it = engine.state(); + for (auto* end = it + kStateSizeT; it < end; ++it) + { + // In the case that `elem` is `uint8_t`, it must be cast to something + // larger so that it prints as an integer rather than a character. For + // simplicity, apply the cast all circumstances. + os << static_cast(little_endian::FromHost(*it)) + << os.fill(); + } + os << engine.next_; + return os; + } + + template + friend std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + randen_engine& engine + ) + { // NOLINT(runtime/references) + using numeric_type = + typename random_internal::stream_format_type::type; + result_type state[kStateSizeT]; + size_t next; + for (auto& elem : state) + { + // It is not possible to read uint8_t from wide streams, so it is + // necessary to read a wider type and then cast it to uint8_t. + numeric_type value; + is >> value; + elem = little_endian::ToHost(static_cast(value)); + } + is >> next; + if (is.fail()) + { + return is; + } + std::memcpy(engine.state(), state, sizeof(state)); + engine.next_ = next; + return is; + } + + private: + static constexpr size_t kStateSizeT = + Randen::kStateBytes / sizeof(result_type); + static constexpr size_t kCapacityT = + Randen::kCapacityBytes / sizeof(result_type); + + // Returns the state array pointer, which is aligned to 16 bytes. + // The first kCapacityT are the `inner' sponge; the remainder are available. + result_type* state() + { + return reinterpret_cast( + (reinterpret_cast(&raw_state_) & 0xf) ? (raw_state_ + 8) : raw_state_ + ); + } + const result_type* state() const + { + return const_cast(this)->state(); + } + + // raw state array, manually aligned in state(). This overallocates + // by 8 bytes since C++ does not guarantee extended heap alignment. + alignas(8) char raw_state_[Randen::kStateBytes + 8]; + size_t next_; // index within state() + Randen impl_; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_ENGINE_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen_hwaes.h b/CAPI/cpp/grpc/include/absl/random/internal/randen_hwaes.h new file mode 100644 index 00000000..6d0e1aec --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen_hwaes.h @@ -0,0 +1,53 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_HWAES_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_HWAES_H_ + +#include "absl/base/config.h" + +// HERMETIC NOTE: The randen_hwaes target must not introduce duplicate +// symbols from arbitrary system and other headers, since it may be built +// with different flags from other targets, using different levels of +// optimization, potentially introducing ODR violations. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RANDen = RANDom generator or beetroots in Swiss High German. + // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random + // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. + // + // RandenHwAes implements the basic state manipulation methods. + class RandenHwAes + { + public: + static void Generate(const void* keys, void* state_void); + static void Absorb(const void* seed_void, void* state_void); + static const void* GetKeys(); + }; + + // HasRandenHwAesImplementation returns true when there is an accelerated + // implementation, and false otherwise. If there is no implementation, + // then attempting to use it will abort the program. + bool HasRandenHwAesImplementation(); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_HWAES_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen_slow.h b/CAPI/cpp/grpc/include/absl/random/internal/randen_slow.h new file mode 100644 index 00000000..25c3133e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen_slow.h @@ -0,0 +1,43 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_SLOW_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_SLOW_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RANDen = RANDom generator or beetroots in Swiss High German. + // RandenSlow implements the basic state manipulation methods for + // architectures lacking AES hardware acceleration intrinsics. + class RandenSlow + { + public: + static void Generate(const void* keys, void* state_void); + static void Absorb(const void* seed_void, void* state_void); + static const void* GetKeys(); + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_SLOW_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/randen_traits.h b/CAPI/cpp/grpc/include/absl/random/internal/randen_traits.h new file mode 100644 index 00000000..5217b888 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/randen_traits.h @@ -0,0 +1,91 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_RANDEN_TRAITS_H_ +#define ABSL_RANDOM_INTERNAL_RANDEN_TRAITS_H_ + +// HERMETIC NOTE: The randen_hwaes target must not introduce duplicate +// symbols from arbitrary system and other headers, since it may be built +// with different flags from other targets, using different levels of +// optimization, potentially introducing ODR violations. + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // RANDen = RANDom generator or beetroots in Swiss High German. + // 'Strong' (well-distributed, unpredictable, backtracking-resistant) random + // generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. + // + // High-level summary: + // 1) Reverie (see "A Robust and Sponge-Like PRNG with Improved Efficiency") is + // a sponge-like random generator that requires a cryptographic permutation. + // It improves upon "Provably Robust Sponge-Based PRNGs and KDFs" by + // achieving backtracking resistance with only one Permute() per buffer. + // + // 2) "Simpira v2: A Family of Efficient Permutations Using the AES Round + // Function" constructs up to 1024-bit permutations using an improved + // Generalized Feistel network with 2-round AES-128 functions. This Feistel + // block shuffle achieves diffusion faster and is less vulnerable to + // sliced-biclique attacks than the Type-2 cyclic shuffle. + // + // 3) "Improving the Generalized Feistel" and "New criterion for diffusion + // property" extends the same kind of improved Feistel block shuffle to 16 + // branches, which enables a 2048-bit permutation. + // + // Combine these three ideas and also change Simpira's subround keys from + // structured/low-entropy counters to digits of Pi (or other random source). + + // RandenTraits contains the basic algorithm traits, such as the size of the + // state, seed, sponge, etc. + struct RandenTraits + { + // Size of the entire sponge / state for the randen PRNG. + static constexpr size_t kStateBytes = 256; // 2048-bit + + // Size of the 'inner' (inaccessible) part of the sponge. Larger values would + // require more frequent calls to RandenGenerate. + static constexpr size_t kCapacityBytes = 16; // 128-bit + + // Size of the default seed consumed by the sponge. + static constexpr size_t kSeedBytes = kStateBytes - kCapacityBytes; + + // Assuming 128-bit blocks, the number of blocks in the state. + // Largest size for which security proofs are known. + static constexpr size_t kFeistelBlocks = 16; + + // Ensures SPRP security and two full subblock diffusions. + // Must be > 4 * log2(kFeistelBlocks). + static constexpr size_t kFeistelRounds = 16 + 1; + + // Size of the key. A 128-bit key block is used for every-other + // feistel block (Type-2 generalized Feistel network) in each round. + static constexpr size_t kKeyBytes = 16 * kFeistelRounds * kFeistelBlocks / 2; + }; + + // Randen key arrays. In randen_round_keys.cc + extern const unsigned char kRandenRoundKeys[RandenTraits::kKeyBytes]; + extern const unsigned char kRandenRoundKeysBE[RandenTraits::kKeyBytes]; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_RANDEN_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/salted_seed_seq.h b/CAPI/cpp/grpc/include/absl/random/internal/salted_seed_seq.h new file mode 100644 index 00000000..29467d5d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/salted_seed_seq.h @@ -0,0 +1,192 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_SALTED_SEED_SEQ_H_ +#define ABSL_RANDOM_INTERNAL_SALTED_SEED_SEQ_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/seed_material.h" +#include "absl/types/optional.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // This class conforms to the C++ Standard "Seed Sequence" concept + // [rand.req.seedseq]. + // + // A `SaltedSeedSeq` is meant to wrap an existing seed sequence and modify + // generated sequence by mixing with extra entropy. This entropy may be + // build-dependent or process-dependent. The implementation may change to be + // have either or both kinds of entropy. If salt is not available sequence is + // not modified. + template + class SaltedSeedSeq + { + public: + using inner_sequence_type = SSeq; + using result_type = typename SSeq::result_type; + + SaltedSeedSeq() : + seq_(absl::make_unique()) + { + } + + template + SaltedSeedSeq(Iterator begin, Iterator end) : + seq_(absl::make_unique(begin, end)) + { + } + + template + SaltedSeedSeq(std::initializer_list il) : + SaltedSeedSeq(il.begin(), il.end()) + { + } + + SaltedSeedSeq(const SaltedSeedSeq&) = delete; + SaltedSeedSeq& operator=(const SaltedSeedSeq&) = delete; + + SaltedSeedSeq(SaltedSeedSeq&&) = default; + SaltedSeedSeq& operator=(SaltedSeedSeq&&) = default; + + template + void generate(RandomAccessIterator begin, RandomAccessIterator end) + { + using U = typename std::iterator_traits::value_type; + + // The common case is that generate is called with ContiguousIterators + // to uint arrays. Such contiguous memory regions may be optimized, + // which we detect here. + using TagType = absl::conditional_t< + (std::is_same::value && + (std::is_pointer::value || + std::is_same::iterator>::value)), + ContiguousAndUint32Tag, + DefaultTag>; + if (begin != end) + { + generate_impl(TagType{}, begin, end, std::distance(begin, end)); + } + } + + template + void param(OutIterator out) const + { + seq_->param(out); + } + + size_t size() const + { + return seq_->size(); + } + + private: + struct ContiguousAndUint32Tag + { + }; + struct DefaultTag + { + }; + + // Generate which requires the iterators are contiguous pointers to uint32_t. + // Fills the initial seed buffer the underlying SSeq::generate() call, + // then mixes in the salt material. + template + void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end, size_t n) + { + seq_->generate(begin, end); + const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0); + auto span = absl::Span(&*begin, n); + MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span); + } + + // The uncommon case for generate is that it is called with iterators over + // some other buffer type which is assignable from a 32-bit value. In this + // case we allocate a temporary 32-bit buffer and then copy-assign back + // to the initial inputs. + template + void generate_impl(DefaultTag, RandomAccessIterator begin, RandomAccessIterator, size_t n) + { + // Allocates a seed buffer of `n` elements, generates the seed, then + // copies the result into the `out` iterator. + absl::InlinedVector data(n, 0); + generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n); + std::copy(data.begin(), data.end(), begin); + } + + // Because [rand.req.seedseq] is not required to be copy-constructible, + // copy-assignable nor movable, we wrap it with unique pointer to be able + // to move SaltedSeedSeq. + std::unique_ptr seq_; + }; + + // is_salted_seed_seq indicates whether the type is a SaltedSeedSeq. + template + struct is_salted_seed_seq : public std::false_type + { + }; + + template + struct is_salted_seed_seq< + T, + typename std::enable_if>::value>::type> : public std::true_type + { + }; + + // MakeSaltedSeedSeq returns a salted variant of the seed sequence. + // When provided with an existing SaltedSeedSeq, returns the input parameter, + // otherwise constructs a new SaltedSeedSeq which embodies the original + // non-salted seed parameters. + template< + typename SSeq, // + typename EnableIf = absl::enable_if_t::value>> + SSeq MakeSaltedSeedSeq(SSeq&& seq) + { + return SSeq(std::forward(seq)); + } + + template< + typename SSeq, // + typename EnableIf = absl::enable_if_t::value>> + SaltedSeedSeq::type> MakeSaltedSeedSeq(SSeq&& seq) + { + using sseq_type = typename std::decay::type; + using result_type = typename sseq_type::result_type; + + absl::InlinedVector data; + seq.param(std::back_inserter(data)); + return SaltedSeedSeq(data.begin(), data.end()); + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_SALTED_SEED_SEQ_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/seed_material.h b/CAPI/cpp/grpc/include/absl/random/internal/seed_material.h new file mode 100644 index 00000000..a1a39968 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/seed_material.h @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_SEED_MATERIAL_H_ +#define ABSL_RANDOM_INTERNAL_SEED_MATERIAL_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/types/optional.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // Returns the number of 32-bit blocks needed to contain the given number of + // bits. + constexpr size_t SeedBitsToBlocks(size_t seed_size) + { + return (seed_size + 31) / 32; + } + + // Amount of entropy (measured in bits) used to instantiate a Seed Sequence, + // with which to create a URBG. + constexpr size_t kEntropyBitsNeeded = 256; + + // Amount of entropy (measured in 32-bit blocks) used to instantiate a Seed + // Sequence, with which to create a URBG. + constexpr size_t kEntropyBlocksNeeded = + random_internal::SeedBitsToBlocks(kEntropyBitsNeeded); + + static_assert(kEntropyBlocksNeeded > 0, "Entropy used to seed URBGs must be nonzero."); + + // Attempts to fill a span of uint32_t-values using an OS-provided source of + // true entropy (eg. /dev/urandom) into an array of uint32_t blocks of data. The + // resulting array may be used to initialize an instance of a class conforming + // to the C++ Standard "Seed Sequence" concept [rand.req.seedseq]. + // + // If values.data() == nullptr, the behavior is undefined. + ABSL_MUST_USE_RESULT + bool ReadSeedMaterialFromOSEntropy(absl::Span values); + + // Attempts to fill a span of uint32_t-values using variates generated by an + // existing instance of a class conforming to the C++ Standard "Uniform Random + // Bit Generator" concept [rand.req.urng]. The resulting data may be used to + // initialize an instance of a class conforming to the C++ Standard + // "Seed Sequence" concept [rand.req.seedseq]. + // + // If urbg == nullptr or values.data() == nullptr, the behavior is undefined. + template + ABSL_MUST_USE_RESULT bool ReadSeedMaterialFromURBG( + URBG* urbg, absl::Span values + ) + { + random_internal::FastUniformBits distr; + + assert(urbg != nullptr && values.data() != nullptr); + if (urbg == nullptr || values.data() == nullptr) + { + return false; + } + + for (uint32_t& seed_value : values) + { + seed_value = distr(*urbg); + } + return true; + } + + // Mixes given sequence of values with into given sequence of seed material. + // Time complexity of this function is O(sequence.size() * + // seed_material.size()). + // + // Algorithm is based on code available at + // https://gist.github.com/imneme/540829265469e673d045 + // by Melissa O'Neill. + void MixIntoSeedMaterial(absl::Span sequence, absl::Span seed_material); + + // Returns salt value. + // + // Salt is obtained only once and stored in static variable. + // + // May return empty value if optaining the salt was not possible. + absl::optional GetSaltMaterial(); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_SEED_MATERIAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/sequence_urbg.h b/CAPI/cpp/grpc/include/absl/random/internal/sequence_urbg.h new file mode 100644 index 00000000..6e8ffea7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/sequence_urbg.h @@ -0,0 +1,78 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_SEQUENCE_URBG_H_ +#define ABSL_RANDOM_INTERNAL_SEQUENCE_URBG_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // `sequence_urbg` is a simple random number generator which meets the + // requirements of [rand.req.urbg], and is solely for testing absl + // distributions. + class sequence_urbg + { + public: + using result_type = uint64_t; + + static constexpr result_type(min)() + { + return (std::numeric_limits::min)(); + } + static constexpr result_type(max)() + { + return (std::numeric_limits::max)(); + } + + sequence_urbg(std::initializer_list data) : + i_(0), + data_(data) + { + } + void reset() + { + i_ = 0; + } + + result_type operator()() + { + return data_[i_++ % data_.size()]; + } + + size_t invocations() const + { + return i_; + } + + private: + size_t i_; + std::vector data_; + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_SEQUENCE_URBG_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/traits.h b/CAPI/cpp/grpc/include/absl/random/internal/traits.h new file mode 100644 index 00000000..cdcf4739 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/traits.h @@ -0,0 +1,181 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_TRAITS_H_ +#define ABSL_RANDOM_INTERNAL_TRAITS_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // random_internal::is_widening_convertible + // + // Returns whether a type A is widening-convertible to a type B. + // + // A is widening-convertible to B means: + // A a = ; + // B b = a; + // A c = b; + // EXPECT_EQ(a, c); + template + class is_widening_convertible + { + // As long as there are enough bits in the exact part of a number: + // - unsigned can fit in float, signed, unsigned + // - signed can fit in float, signed + // - float can fit in float + // So we define rank to be: + // - rank(float) -> 2 + // - rank(signed) -> 1 + // - rank(unsigned) -> 0 + template + static constexpr int rank() + { + return !std::numeric_limits::is_integer + + std::numeric_limits::is_signed; + } + + public: + // If an arithmetic-type B can represent at least as many digits as a type A, + // and B belongs to a rank no lower than A, then A can be safely represented + // by B through a widening-conversion. + static constexpr bool value = + std::numeric_limits::digits <= std::numeric_limits::digits && + rank() <= rank(); + }; + + template + struct IsIntegral : std::is_integral + { + }; + template<> + struct IsIntegral : std::true_type + { + }; + template<> + struct IsIntegral : std::true_type + { + }; + + template + struct MakeUnsigned : std::make_unsigned + { + }; + template<> + struct MakeUnsigned + { + using type = absl::uint128; + }; + template<> + struct MakeUnsigned + { + using type = absl::uint128; + }; + + template + struct IsUnsigned : std::is_unsigned + { + }; + template<> + struct IsUnsigned : std::false_type + { + }; + template<> + struct IsUnsigned : std::true_type + { + }; + + // unsigned_bits::type returns the unsigned int type with the indicated + // number of bits. + template + struct unsigned_bits; + + template<> + struct unsigned_bits<8> + { + using type = uint8_t; + }; + template<> + struct unsigned_bits<16> + { + using type = uint16_t; + }; + template<> + struct unsigned_bits<32> + { + using type = uint32_t; + }; + template<> + struct unsigned_bits<64> + { + using type = uint64_t; + }; + + template<> + struct unsigned_bits<128> + { + using type = absl::uint128; + }; + + // 256-bit wrapper for wide multiplications. + struct U256 + { + uint128 hi; + uint128 lo; + }; + template<> + struct unsigned_bits<256> + { + using type = U256; + }; + + template + struct make_unsigned_bits + { + using type = typename unsigned_bits< + std::numeric_limits::type>::digits>::type; + }; + + template + int BitWidth(T v) + { + // Workaround for bit_width not supporting int128. + // Don't hardcode `64` to make sure this code does not trigger compiler + // warnings in smaller types. + constexpr int half_bits = sizeof(T) * 8 / 2; + if (sizeof(T) == 16 && (v >> half_bits) != 0) + { + return bit_width(static_cast(v >> half_bits)) + half_bits; + } + else + { + return bit_width(static_cast(v)); + } + } + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/uniform_helper.h b/CAPI/cpp/grpc/include/absl/random/internal/uniform_helper.h new file mode 100644 index 00000000..2140f04a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/uniform_helper.h @@ -0,0 +1,260 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ +#define ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/random/internal/traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class uniform_int_distribution; + + template + class uniform_real_distribution; + + // Interval tag types which specify whether the interval is open or closed + // on either boundary. + + namespace random_internal + { + template + struct TagTypeCompare + { + }; + + template + constexpr bool operator==(TagTypeCompare, TagTypeCompare) + { + // Tags are mono-states. They always compare equal. + return true; + } + template + constexpr bool operator!=(TagTypeCompare, TagTypeCompare) + { + return false; + } + + } // namespace random_internal + + struct IntervalClosedClosedTag : public random_internal::TagTypeCompare + { + }; + struct IntervalClosedOpenTag : public random_internal::TagTypeCompare + { + }; + struct IntervalOpenClosedTag : public random_internal::TagTypeCompare + { + }; + struct IntervalOpenOpenTag : public random_internal::TagTypeCompare + { + }; + + namespace random_internal + { + + // In the absence of an explicitly provided return-type, the template + // "uniform_inferred_return_t" is used to derive a suitable type, based on + // the data-types of the endpoint-arguments {A lo, B hi}. + // + // Given endpoints {A lo, B hi}, one of {A, B} will be chosen as the + // return-type, if one type can be implicitly converted into the other, in a + // lossless way. The template "is_widening_convertible" implements the + // compile-time logic for deciding if such a conversion is possible. + // + // If no such conversion between {A, B} exists, then the overload for + // absl::Uniform() will be discarded, and the call will be ill-formed. + // Return-type for absl::Uniform() when the return-type is inferred. + template + using uniform_inferred_return_t = + absl::enable_if_t, is_widening_convertible>::value, typename std::conditional::value, B, A>::type>; + + // The functions + // uniform_lower_bound(tag, a, b) + // and + // uniform_upper_bound(tag, a, b) + // are used as implementation-details for absl::Uniform(). + // + // Conceptually, + // [a, b] == [uniform_lower_bound(IntervalClosedClosed, a, b), + // uniform_upper_bound(IntervalClosedClosed, a, b)] + // (a, b) == [uniform_lower_bound(IntervalOpenOpen, a, b), + // uniform_upper_bound(IntervalOpenOpen, a, b)] + // [a, b) == [uniform_lower_bound(IntervalClosedOpen, a, b), + // uniform_upper_bound(IntervalClosedOpen, a, b)] + // (a, b] == [uniform_lower_bound(IntervalOpenClosed, a, b), + // uniform_upper_bound(IntervalOpenClosed, a, b)] + // + template + typename absl::enable_if_t< + absl::conjunction< + IsIntegral, + absl::disjunction, std::is_same>>::value, + IntType> + uniform_lower_bound(Tag, IntType a, IntType) + { + return a < (std::numeric_limits::max)() ? (a + 1) : a; + } + + template + typename absl::enable_if_t< + absl::conjunction< + std::is_floating_point, + absl::disjunction, std::is_same>>::value, + FloatType> + uniform_lower_bound(Tag, FloatType a, FloatType b) + { + return std::nextafter(a, b); + } + + template + typename absl::enable_if_t< + absl::disjunction, std::is_same>::value, + NumType> + uniform_lower_bound(Tag, NumType a, NumType) + { + return a; + } + + template + typename absl::enable_if_t< + absl::conjunction< + IsIntegral, + absl::disjunction, std::is_same>>::value, + IntType> + uniform_upper_bound(Tag, IntType, IntType b) + { + return b > (std::numeric_limits::min)() ? (b - 1) : b; + } + + template + typename absl::enable_if_t< + absl::conjunction< + std::is_floating_point, + absl::disjunction, std::is_same>>::value, + FloatType> + uniform_upper_bound(Tag, FloatType, FloatType b) + { + return b; + } + + template + typename absl::enable_if_t< + absl::conjunction< + IsIntegral, + absl::disjunction, std::is_same>>::value, + IntType> + uniform_upper_bound(Tag, IntType, IntType b) + { + return b; + } + + template + typename absl::enable_if_t< + absl::conjunction< + std::is_floating_point, + absl::disjunction, std::is_same>>::value, + FloatType> + uniform_upper_bound(Tag, FloatType, FloatType b) + { + return std::nextafter(b, (std::numeric_limits::max)()); + } + + // Returns whether the bounds are valid for the underlying distribution. + // Inputs must have already been resolved via uniform_*_bound calls. + // + // The c++ standard constraints in [rand.dist.uni.int] are listed as: + // requires: lo <= hi. + // + // In the uniform_int_distrubtion, {lo, hi} are closed, closed. Thus: + // [0, 0] is legal. + // [0, 0) is not legal, but [0, 1) is, which translates to [0, 0]. + // (0, 1) is not legal, but (0, 2) is, which translates to [1, 1]. + // (0, 0] is not legal, but (0, 1] is, which translates to [1, 1]. + // + // The c++ standard constraints in [rand.dist.uni.real] are listed as: + // requires: lo <= hi. + // requires: (hi - lo) <= numeric_limits::max() + // + // In the uniform_real_distribution, {lo, hi} are closed, open, Thus: + // [0, 0] is legal, which is [0, 0+epsilon). + // [0, 0) is legal. + // (0, 0) is not legal, but (0-epsilon, 0+epsilon) is. + // (0, 0] is not legal, but (0, 0+epsilon] is. + // + template + absl::enable_if_t::value, bool> + is_uniform_range_valid(FloatType a, FloatType b) + { + return a <= b && std::isfinite(b - a); + } + + template + absl::enable_if_t::value, bool> + is_uniform_range_valid(IntType a, IntType b) + { + return a <= b; + } + + // UniformDistribution selects either absl::uniform_int_distribution + // or absl::uniform_real_distribution depending on the NumType parameter. + template + using UniformDistribution = + typename std::conditional::value, absl::uniform_int_distribution, absl::uniform_real_distribution>::type; + + // UniformDistributionWrapper is used as the underlying distribution type + // by the absl::Uniform template function. It selects the proper Abseil + // uniform distribution and provides constructor overloads that match the + // expected parameter order as well as adjusting distribution bounds based + // on the tag. + template + struct UniformDistributionWrapper : public UniformDistribution + { + template + explicit UniformDistributionWrapper(TagType, NumType lo, NumType hi) : + UniformDistribution( + uniform_lower_bound(TagType{}, lo, hi), + uniform_upper_bound(TagType{}, lo, hi) + ) + { + } + + explicit UniformDistributionWrapper(NumType lo, NumType hi) : + UniformDistribution( + uniform_lower_bound(IntervalClosedOpenTag(), lo, hi), + uniform_upper_bound(IntervalClosedOpenTag(), lo, hi) + ) + { + } + + explicit UniformDistributionWrapper() : + UniformDistribution(std::numeric_limits::lowest(), (std::numeric_limits::max)()) + { + } + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/internal/wide_multiply.h b/CAPI/cpp/grpc/include/absl/random/internal/wide_multiply.h new file mode 100644 index 00000000..21a09c57 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/internal/wide_multiply.h @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_WIDE_MULTIPLY_H_ +#define ABSL_RANDOM_INTERNAL_WIDE_MULTIPLY_H_ + +#include +#include +#include + +#if (defined(_WIN32) || defined(_WIN64)) && defined(_M_IA64) +#include // NOLINT(build/include_order) +#pragma intrinsic(_umul128) +#define ABSL_INTERNAL_USE_UMUL128 1 +#endif + +#include "absl/base/config.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" +#include "absl/random/internal/traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace random_internal + { + + // wide_multiply multiplies two N-bit values to a 2N-bit result. + template + struct wide_multiply + { + static constexpr size_t kN = std::numeric_limits::digits; + using input_type = UIntType; + using result_type = typename random_internal::unsigned_bits::type; + + static result_type multiply(input_type a, input_type b) + { + return static_cast(a) * b; + } + + static input_type hi(result_type r) + { + return static_cast(r >> kN); + } + static input_type lo(result_type r) + { + return static_cast(r); + } + + static_assert(std::is_unsigned::value, "Class-template wide_multiply<> argument must be unsigned."); + }; + + // MultiplyU128ToU256 multiplies two 128-bit values to a 256-bit value. + inline U256 MultiplyU128ToU256(uint128 a, uint128 b) + { + const uint128 a00 = static_cast(a); + const uint128 a64 = a >> 64; + const uint128 b00 = static_cast(b); + const uint128 b64 = b >> 64; + + const uint128 c00 = a00 * b00; + const uint128 c64a = a00 * b64; + const uint128 c64b = a64 * b00; + const uint128 c128 = a64 * b64; + + const uint64_t carry = + static_cast(((c00 >> 64) + static_cast(c64a) + static_cast(c64b)) >> 64); + + return {c128 + (c64a >> 64) + (c64b >> 64) + carry, c00 + (c64a << 64) + (c64b << 64)}; + } + + template<> + struct wide_multiply + { + using input_type = uint128; + using result_type = U256; + + static result_type multiply(input_type a, input_type b) + { + return MultiplyU128ToU256(a, b); + } + + static input_type hi(result_type r) + { + return r.hi; + } + static input_type lo(result_type r) + { + return r.lo; + } + }; + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_WIDE_MULTIPLY_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/log_uniform_int_distribution.h b/CAPI/cpp/grpc/include/absl/random/log_uniform_int_distribution.h new file mode 100644 index 00000000..ab11684f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/log_uniform_int_distribution.h @@ -0,0 +1,310 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_LOG_UNIFORM_INT_DISTRIBUTION_H_ +#define ABSL_RANDOM_LOG_UNIFORM_INT_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/numeric/bits.h" +#include "absl/random/internal/fastmath.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" +#include "absl/random/uniform_int_distribution.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // log_uniform_int_distribution: + // + // Returns a random variate R in range [min, max] such that + // floor(log(R-min, base)) is uniformly distributed. + // We ensure uniformity by discretization using the + // boundary sets [0, 1, base, base * base, ... min(base*n, max)] + // + template + class log_uniform_int_distribution + { + private: + using unsigned_type = + typename random_internal::make_unsigned_bits::type; + + public: + using result_type = IntType; + + class param_type + { + public: + using distribution_type = log_uniform_int_distribution; + + explicit param_type( + result_type min = 0, + result_type max = (std::numeric_limits::max)(), + result_type base = 2 + ) : + min_(min), + max_(max), + base_(base), + range_(static_cast(max_) - static_cast(min_)), + log_range_(0) + { + assert(max_ >= min_); + assert(base_ > 1); + + if (base_ == 2) + { + // Determine where the first set bit is on range(), giving a log2(range) + // value which can be used to construct bounds. + log_range_ = (std::min)(random_internal::BitWidth(range()), std::numeric_limits::digits); + } + else + { + // NOTE: Computing the logN(x) introduces error from 2 sources: + // 1. Conversion of int to double loses precision for values >= + // 2^53, which may cause some log() computations to operate on + // different values. + // 2. The error introduced by the division will cause the result + // to differ from the expected value. + // + // Thus a result which should equal K may equal K +/- epsilon, + // which can eliminate some values depending on where the bounds fall. + const double inv_log_base = 1.0 / std::log(static_cast(base_)); + const double log_range = std::log(static_cast(range()) + 0.5); + log_range_ = static_cast(std::ceil(inv_log_base * log_range)); + } + } + + result_type(min)() const + { + return min_; + } + result_type(max)() const + { + return max_; + } + result_type base() const + { + return base_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.min_ == b.min_ && a.max_ == b.max_ && a.base_ == b.base_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class log_uniform_int_distribution; + + int log_range() const + { + return log_range_; + } + unsigned_type range() const + { + return range_; + } + + result_type min_; + result_type max_; + result_type base_; + unsigned_type range_; // max - min + int log_range_; // ceil(logN(range_)) + + static_assert(random_internal::IsIntegral::value, "Class-template absl::log_uniform_int_distribution<> must be " + "parameterized using an integral type."); + }; + + log_uniform_int_distribution() : + log_uniform_int_distribution(0) + { + } + + explicit log_uniform_int_distribution( + result_type min, + result_type max = (std::numeric_limits::max)(), + result_type base = 2 + ) : + param_(min, max, base) + { + } + + explicit log_uniform_int_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + // generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p) + { + return static_cast((p.min)() + Generate(g, p)); + } + + result_type(min)() const + { + return (param_.min)(); + } + result_type(max)() const + { + return (param_.max)(); + } + result_type base() const + { + return param_.base(); + } + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + friend bool operator==(const log_uniform_int_distribution& a, const log_uniform_int_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const log_uniform_int_distribution& a, const log_uniform_int_distribution& b) + { + return a.param_ != b.param_; + } + + private: + // Returns a log-uniform variate in the range [0, p.range()]. The caller + // should add min() to shift the result to the correct range. + template + unsigned_type Generate(URNG& g, // NOLINT(runtime/references) + const param_type& p); + + param_type param_; + }; + + template + template + typename log_uniform_int_distribution::unsigned_type + log_uniform_int_distribution::Generate( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + // sample e over [0, log_range]. Map the results of e to this: + // 0 => 0 + // 1 => [1, b-1] + // 2 => [b, (b^2)-1] + // n => [b^(n-1)..(b^n)-1] + const int e = absl::uniform_int_distribution(0, p.log_range())(g); + if (e == 0) + { + return 0; + } + const int d = e - 1; + + unsigned_type base_e, top_e; + if (p.base() == 2) + { + base_e = static_cast(1) << d; + + top_e = (e >= std::numeric_limits::digits) ? (std::numeric_limits::max)() : (static_cast(1) << e) - 1; + } + else + { + const double r = std::pow(static_cast(p.base()), d); + const double s = (r * static_cast(p.base())) - 1.0; + + base_e = + (r > static_cast((std::numeric_limits::max)())) ? (std::numeric_limits::max)() : static_cast(r); + + top_e = + (s > static_cast((std::numeric_limits::max)())) ? (std::numeric_limits::max)() : static_cast(s); + } + + const unsigned_type lo = (base_e >= p.range()) ? p.range() : base_e; + const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e; + + // choose uniformly over [lo, hi] + return absl::uniform_int_distribution( + static_cast(lo), static_cast(hi) + )(g); + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const log_uniform_int_distribution& x + ) + { + using stream_type = + typename random_internal::stream_format_type::type; + auto saver = random_internal::make_ostream_state_saver(os); + os << static_cast((x.min)()) << os.fill() + << static_cast((x.max)()) << os.fill() + << static_cast(x.base()); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + log_uniform_int_distribution& x + ) + { // NOLINT(runtime/references) + using param_type = typename log_uniform_int_distribution::param_type; + using result_type = + typename log_uniform_int_distribution::result_type; + using stream_type = + typename random_internal::stream_format_type::type; + + stream_type min; + stream_type max; + stream_type base; + + auto saver = random_internal::make_istream_state_saver(is); + is >> min >> max >> base; + if (!is.fail()) + { + x.param(param_type(static_cast(min), static_cast(max), static_cast(base))); + } + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_LOG_UNIFORM_INT_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/mock_distributions.h b/CAPI/cpp/grpc/include/absl/random/mock_distributions.h new file mode 100644 index 00000000..337c84b3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/mock_distributions.h @@ -0,0 +1,259 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: mock_distributions.h +// ----------------------------------------------------------------------------- +// +// This file contains mock distribution functions for use alongside an +// `absl::MockingBitGen` object within the Googletest testing framework. Such +// mocks are useful to provide deterministic values as return values within +// (otherwise random) Abseil distribution functions. +// +// The return type of each function is a mock expectation object which +// is used to set the match result. +// +// More information about the Googletest testing framework is available at +// https://github.com/google/googletest +// +// EXPECT_CALL and ON_CALL need to be made within the same DLL component as +// the call to absl::Uniform and related methods, otherwise mocking will fail +// since the underlying implementation creates a type-specific pointer which +// will be distinct across different DLL boundaries. +// +// Example: +// +// absl::MockingBitGen mock; +// EXPECT_CALL(absl::MockUniform(), Call(mock, 1, 1000)) +// .WillRepeatedly(testing::ReturnRoundRobin({20, 40})); +// +// EXPECT_EQ(absl::Uniform(gen, 1, 1000), 20); +// EXPECT_EQ(absl::Uniform(gen, 1, 1000), 40); +// EXPECT_EQ(absl::Uniform(gen, 1, 1000), 20); +// EXPECT_EQ(absl::Uniform(gen, 1, 1000), 40); + +#ifndef ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_ +#define ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" +#include "absl/random/distributions.h" +#include "absl/random/internal/mock_overload_set.h" +#include "absl/random/mocking_bit_gen.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // absl::MockUniform + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Uniform. + // + // `absl::MockUniform` is a class template used in conjunction with Googletest's + // `ON_CALL()` and `EXPECT_CALL()` macros. To use it, default-construct an + // instance of it inside `ON_CALL()` or `EXPECT_CALL()`, and use `Call(...)` the + // same way one would define mocks on a Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockUniform(), Call(mock)) + // .WillOnce(Return(123456)); + // auto x = absl::Uniform(mock); + // assert(x == 123456) + // + template + using MockUniform = random_internal::MockOverloadSet< + random_internal::UniformDistributionWrapper, + R(IntervalClosedOpenTag, MockingBitGen&, R, R), + R(IntervalClosedClosedTag, MockingBitGen&, R, R), + R(IntervalOpenOpenTag, MockingBitGen&, R, R), + R(IntervalOpenClosedTag, MockingBitGen&, R, R), + R(MockingBitGen&, R, R), + R(MockingBitGen&)>; + + // ----------------------------------------------------------------------------- + // absl::MockBernoulli + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Bernoulli. + // + // `absl::MockBernoulli` is a class used in conjunction with Googletest's + // `ON_CALL()` and `EXPECT_CALL()` macros. To use it, default-construct an + // instance of it inside `ON_CALL()` or `EXPECT_CALL()`, and use `Call(...)` the + // same way one would define mocks on a Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockBernoulli(), Call(mock, testing::_)) + // .WillOnce(Return(false)); + // assert(absl::Bernoulli(mock, 0.5) == false); + // + using MockBernoulli = + random_internal::MockOverloadSet; + + // ----------------------------------------------------------------------------- + // absl::MockBeta + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Beta. + // + // `absl::MockBeta` is a class used in conjunction with Googletest's `ON_CALL()` + // and `EXPECT_CALL()` macros. To use it, default-construct an instance of it + // inside `ON_CALL()` or `EXPECT_CALL()`, and use `Call(...)` the same way one + // would define mocks on a Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockBeta(), Call(mock, 3.0, 2.0)) + // .WillOnce(Return(0.567)); + // auto x = absl::Beta(mock, 3.0, 2.0); + // assert(x == 0.567); + // + template + using MockBeta = + random_internal::MockOverloadSet, RealType(MockingBitGen&, RealType, RealType)>; + + // ----------------------------------------------------------------------------- + // absl::MockExponential + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Exponential. + // + // `absl::MockExponential` is a class template used in conjunction with + // Googletest's `ON_CALL()` and `EXPECT_CALL()` macros. To use it, + // default-construct an instance of it inside `ON_CALL()` or `EXPECT_CALL()`, + // and use `Call(...)` the same way one would define mocks on a + // Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockExponential(), Call(mock, 0.5)) + // .WillOnce(Return(12.3456789)); + // auto x = absl::Exponential(mock, 0.5); + // assert(x == 12.3456789) + // + template + using MockExponential = + random_internal::MockOverloadSet, RealType(MockingBitGen&, RealType)>; + + // ----------------------------------------------------------------------------- + // absl::MockGaussian + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Gaussian. + // + // `absl::MockGaussian` is a class template used in conjunction with + // Googletest's `ON_CALL()` and `EXPECT_CALL()` macros. To use it, + // default-construct an instance of it inside `ON_CALL()` or `EXPECT_CALL()`, + // and use `Call(...)` the same way one would define mocks on a + // Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockGaussian(), Call(mock, 16.3, 3.3)) + // .WillOnce(Return(12.3456789)); + // auto x = absl::Gaussian(mock, 16.3, 3.3); + // assert(x == 12.3456789) + // + template + using MockGaussian = + random_internal::MockOverloadSet, RealType(MockingBitGen&, RealType, RealType)>; + + // ----------------------------------------------------------------------------- + // absl::MockLogUniform + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::LogUniform. + // + // `absl::MockLogUniform` is a class template used in conjunction with + // Googletest's `ON_CALL()` and `EXPECT_CALL()` macros. To use it, + // default-construct an instance of it inside `ON_CALL()` or `EXPECT_CALL()`, + // and use `Call(...)` the same way one would define mocks on a + // Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockLogUniform(), Call(mock, 10, 10000, 10)) + // .WillOnce(Return(1221)); + // auto x = absl::LogUniform(mock, 10, 10000, 10); + // assert(x == 1221) + // + template + using MockLogUniform = random_internal::MockOverloadSet< + absl::log_uniform_int_distribution, + IntType(MockingBitGen&, IntType, IntType, IntType)>; + + // ----------------------------------------------------------------------------- + // absl::MockPoisson + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Poisson. + // + // `absl::MockPoisson` is a class template used in conjunction with Googletest's + // `ON_CALL()` and `EXPECT_CALL()` macros. To use it, default-construct an + // instance of it inside `ON_CALL()` or `EXPECT_CALL()`, and use `Call(...)` the + // same way one would define mocks on a Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockPoisson(), Call(mock, 2.0)) + // .WillOnce(Return(1221)); + // auto x = absl::Poisson(mock, 2.0); + // assert(x == 1221) + // + template + using MockPoisson = + random_internal::MockOverloadSet, IntType(MockingBitGen&, double)>; + + // ----------------------------------------------------------------------------- + // absl::MockZipf + // ----------------------------------------------------------------------------- + // + // Matches calls to absl::Zipf. + // + // `absl::MockZipf` is a class template used in conjunction with Googletest's + // `ON_CALL()` and `EXPECT_CALL()` macros. To use it, default-construct an + // instance of it inside `ON_CALL()` or `EXPECT_CALL()`, and use `Call(...)` the + // same way one would define mocks on a Googletest `MockFunction()`. + // + // Example: + // + // absl::MockingBitGen mock; + // EXPECT_CALL(absl::MockZipf(), Call(mock, 1000000, 2.0, 1.0)) + // .WillOnce(Return(1221)); + // auto x = absl::Zipf(mock, 1000000, 2.0, 1.0); + // assert(x == 1221) + // + template + using MockZipf = + random_internal::MockOverloadSet, IntType(MockingBitGen&, IntType, double, double)>; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_MOCK_DISTRIBUTIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/mocking_bit_gen.h b/CAPI/cpp/grpc/include/absl/random/mocking_bit_gen.h new file mode 100644 index 00000000..ce3513af --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/mocking_bit_gen.h @@ -0,0 +1,255 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// mocking_bit_gen.h +// ----------------------------------------------------------------------------- +// +// This file includes an `absl::MockingBitGen` class to use as a mock within the +// Googletest testing framework. Such a mock is useful to provide deterministic +// values as return values within (otherwise random) Abseil distribution +// functions. Such determinism within a mock is useful within testing frameworks +// to test otherwise indeterminate APIs. +// +// More information about the Googletest testing framework is available at +// https://github.com/google/googletest + +#ifndef ABSL_RANDOM_MOCKING_BIT_GEN_H_ +#define ABSL_RANDOM_MOCKING_BIT_GEN_H_ + +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/fast_type_id.h" +#include "absl/container/flat_hash_map.h" +#include "absl/meta/type_traits.h" +#include "absl/random/distributions.h" +#include "absl/random/internal/distribution_caller.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "absl/types/span.h" +#include "absl/types/variant.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace random_internal + { + template + struct DistributionCaller; + class MockHelpers; + + } // namespace random_internal + class BitGenRef; + + // MockingBitGen + // + // `absl::MockingBitGen` is a mock Uniform Random Bit Generator (URBG) class + // which can act in place of an `absl::BitGen` URBG within tests using the + // Googletest testing framework. + // + // Usage: + // + // Use an `absl::MockingBitGen` along with a mock distribution object (within + // mock_distributions.h) inside Googletest constructs such as ON_CALL(), + // EXPECT_TRUE(), etc. to produce deterministic results conforming to the + // distribution's API contract. + // + // Example: + // + // // Mock a call to an `absl::Bernoulli` distribution using Googletest + // absl::MockingBitGen bitgen; + // + // ON_CALL(absl::MockBernoulli(), Call(bitgen, 0.5)) + // .WillByDefault(testing::Return(true)); + // EXPECT_TRUE(absl::Bernoulli(bitgen, 0.5)); + // + // // Mock a call to an `absl::Uniform` distribution within Googletest + // absl::MockingBitGen bitgen; + // + // ON_CALL(absl::MockUniform(), Call(bitgen, testing::_, testing::_)) + // .WillByDefault([] (int low, int high) { + // return low + (high - low) / 2; + // }); + // + // EXPECT_EQ(absl::Uniform(gen, 0, 10), 5); + // EXPECT_EQ(absl::Uniform(gen, 30, 40), 35); + // + // At this time, only mock distributions supplied within the Abseil random + // library are officially supported. + // + // EXPECT_CALL and ON_CALL need to be made within the same DLL component as + // the call to absl::Uniform and related methods, otherwise mocking will fail + // since the underlying implementation creates a type-specific pointer which + // will be distinct across different DLL boundaries. + // + class MockingBitGen + { + public: + MockingBitGen() = default; + ~MockingBitGen() = default; + + // URBG interface + using result_type = absl::BitGen::result_type; + + static constexpr result_type(min)() + { + return (absl::BitGen::min)(); + } + static constexpr result_type(max)() + { + return (absl::BitGen::max)(); + } + result_type operator()() + { + return gen_(); + } + + private: + // GetMockFnType returns the testing::MockFunction for a result and tuple. + // This method only exists for type deduction and is otherwise unimplemented. + template + static auto GetMockFnType(ResultT, std::tuple) + -> ::testing::MockFunction; + + // MockFnCaller is a helper method for use with absl::apply to + // apply an ArgTupleT to a compatible MockFunction. + // NOTE: MockFnCaller is essentially equivalent to the lambda: + // [fn](auto... args) { return fn->Call(std::move(args)...)} + // however that fails to build on some supported platforms. + template + struct MockFnCaller; + + // specialization for std::tuple. + template + struct MockFnCaller> + { + MockFnType* fn; + inline ResultT operator()(Args... args) + { + return fn->Call(std::move(args)...); + } + }; + + // FunctionHolder owns a particular ::testing::MockFunction associated with + // a mocked type signature, and implement the type-erased Apply call, which + // applies type-erased arguments to the mock. + class FunctionHolder + { + public: + virtual ~FunctionHolder() = default; + + // Call is a dispatch function which converts the + // generic type-erased parameters into a specific mock invocation call. + virtual void Apply(/*ArgTupleT*/ void* args_tuple, + /*ResultT*/ void* result) = 0; + }; + + template + class FunctionHolderImpl final : public FunctionHolder + { + public: + void Apply(void* args_tuple, void* result) override + { + // Requires tuple_args to point to a ArgTupleT, which is a + // std::tuple used to invoke the mock function. Requires result + // to point to a ResultT, which is the result of the call. + *static_cast(result) = + absl::apply(MockFnCaller{&mock_fn_}, *static_cast(args_tuple)); + } + + MockFnType mock_fn_; + }; + + // MockingBitGen::RegisterMock + // + // RegisterMock(FastTypeIdType) is the main extension + // point for extending the MockingBitGen framework. It provides a mechanism to + // install a mock expectation for a function like ResultT(Args...) keyed by + // type_idex onto the MockingBitGen context. The key is that the type_index + // used to register must match the type index used to call the mock. + // + // The returned MockFunction<...> type can be used to setup additional + // distribution parameters of the expectation. + template + auto RegisterMock(SelfT&, base_internal::FastTypeIdType type) + -> decltype(GetMockFnType(std::declval(), std::declval()))& + { + using MockFnType = decltype(GetMockFnType(std::declval(), std::declval())); + + using WrappedFnType = absl::conditional_t< + std::is_same>::value, + ::testing::NiceMock, + absl::conditional_t< + std::is_same>::value, + ::testing::NaggyMock, + absl::conditional_t< + std::is_same>::value, + ::testing::StrictMock, + MockFnType>>>; + + using ImplT = FunctionHolderImpl; + auto& mock = mocks_[type]; + if (!mock) + { + mock = absl::make_unique(); + } + return static_cast(mock.get())->mock_fn_; + } + + // MockingBitGen::InvokeMock + // + // InvokeMock(FastTypeIdType, args, result) is the entrypoint for invoking + // mocks registered on MockingBitGen. + // + // When no mocks are registered on the provided FastTypeIdType, returns false. + // Otherwise attempts to invoke the mock function ResultT(Args...) that + // was previously registered via the type_index. + // Requires tuple_args to point to a ArgTupleT, which is a std::tuple + // used to invoke the mock function. + // Requires result to point to a ResultT, which is the result of the call. + inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple, void* result) + { + // Trigger a mock, if there exists one that matches `param`. + auto it = mocks_.find(type); + if (it == mocks_.end()) + return false; + it->second->Apply(args_tuple, result); + return true; + } + + absl::flat_hash_map> + mocks_; + absl::BitGen gen_; + + template + friend struct ::absl::random_internal::DistributionCaller; // for InvokeMock + friend class ::absl::BitGenRef; // for InvokeMock + friend class ::absl::random_internal::MockHelpers; // for RegisterMock, + // InvokeMock + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_MOCKING_BIT_GEN_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/poisson_distribution.h b/CAPI/cpp/grpc/include/absl/random/poisson_distribution.h new file mode 100644 index 00000000..05657c60 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/poisson_distribution.h @@ -0,0 +1,315 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_POISSON_DISTRIBUTION_H_ +#define ABSL_RANDOM_POISSON_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/fastmath.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::poisson_distribution: + // Generates discrete variates conforming to a Poisson distribution. + // p(n) = (mean^n / n!) exp(-mean) + // + // Depending on the parameter, the distribution selects one of the following + // algorithms: + // * The standard algorithm, attributed to Knuth, extended using a split method + // for larger values + // * The "Ratio of Uniforms as a convenient method for sampling from classical + // discrete distributions", Stadlober, 1989. + // http://www.sciencedirect.com/science/article/pii/0377042790903495 + // + // NOTE: param_type.mean() is a double, which permits values larger than + // poisson_distribution::max(), however this should be avoided and + // the distribution results are limited to the max() value. + // + // The goals of this implementation are to provide good performance while still + // beig thread-safe: This limits the implementation to not using lgamma provided + // by . + // + template + class poisson_distribution + { + public: + using result_type = IntType; + + class param_type + { + public: + using distribution_type = poisson_distribution; + explicit param_type(double mean = 1.0); + + double mean() const + { + return mean_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.mean_ == b.mean_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class poisson_distribution; + + double mean_; + double emu_; // e ^ -mean_ + double lmu_; // ln(mean_) + double s_; + double log_k_; + int split_; + + static_assert(random_internal::IsIntegral::value, "Class-template absl::poisson_distribution<> must be " + "parameterized using an integral type."); + }; + + poisson_distribution() : + poisson_distribution(1.0) + { + } + + explicit poisson_distribution(double mean) : + param_(mean) + { + } + + explicit poisson_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + // generating functions + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return 0; + } + result_type(max)() const + { + return (std::numeric_limits::max)(); + } + + double mean() const + { + return param_.mean(); + } + + friend bool operator==(const poisson_distribution& a, const poisson_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const poisson_distribution& a, const poisson_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + random_internal::FastUniformBits fast_u64_; + }; + + // ----------------------------------------------------------------------------- + // Implementation details follow + // ----------------------------------------------------------------------------- + + template + poisson_distribution::param_type::param_type(double mean) : + mean_(mean), + split_(0) + { + assert(mean >= 0); + assert(mean <= static_cast((std::numeric_limits::max)())); + // As a defensive measure, avoid large values of the mean. The rejection + // algorithm used does not support very large values well. It my be worth + // changing algorithms to better deal with these cases. + assert(mean <= 1e10); + if (mean_ < 10) + { + // For small lambda, use the knuth method. + split_ = 1; + emu_ = std::exp(-mean_); + } + else if (mean_ <= 50) + { + // Use split-knuth method. + split_ = 1 + static_cast(mean_ / 10.0); + emu_ = std::exp(-mean_ / static_cast(split_)); + } + else + { + // Use ratio of uniforms method. + constexpr double k2E = 0.7357588823428846; + constexpr double kSA = 0.4494580810294493; + + lmu_ = std::log(mean_); + double a = mean_ + 0.5; + s_ = kSA + std::sqrt(k2E * a); + const double mode = std::ceil(mean_) - 1; + log_k_ = lmu_ * mode - absl::random_internal::StirlingLogFactorial(mode); + } + } + + template + template + typename poisson_distribution::result_type + poisson_distribution::operator()( + URBG& g, // NOLINT(runtime/references) + const param_type& p + ) + { + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + using random_internal::GenerateSignedTag; + + if (p.split_ != 0) + { + // Use Knuth's algorithm with range splitting to avoid floating-point + // errors. Knuth's algorithm is: Ui is a sequence of uniform variates on + // (0,1); return the number of variates required for product(Ui) < + // exp(-lambda). + // + // The expected number of variates required for Knuth's method can be + // computed as follows: + // The expected value of U is 0.5, so solving for 0.5^n < exp(-lambda) gives + // the expected number of uniform variates + // required for a given lambda, which is: + // lambda = [2, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17] + // n = [3, 8, 13, 15, 16, 18, 19, 21, 22, 24, 25] + // + result_type n = 0; + for (int split = p.split_; split > 0; --split) + { + double r = 1.0; + do + { + r *= GenerateRealFromBits( + fast_u64_(g) + ); // U(-1, 0) + ++n; + } while (r > p.emu_); + --n; + } + return n; + } + + // Use ratio of uniforms method. + // + // Let u ~ Uniform(0, 1), v ~ Uniform(-1, 1), + // a = lambda + 1/2, + // s = 1.5 - sqrt(3/e) + sqrt(2(lambda + 1/2)/e), + // x = s * v/u + a. + // P(floor(x) = k | u^2 < f(floor(x))/k), where + // f(m) = lambda^m exp(-lambda)/ m!, for 0 <= m, and f(m) = 0 otherwise, + // and k = max(f). + const double a = p.mean_ + 0.5; + for (;;) + { + const double u = GenerateRealFromBits( + fast_u64_(g) + ); // U(0, 1) + const double v = GenerateRealFromBits( + fast_u64_(g) + ); // U(-1, 1) + + const double x = std::floor(p.s_ * v / u + a); + if (x < 0) + continue; // f(negative) = 0 + const double rhs = x * p.lmu_; + // clang-format off + double s = (x <= 1.0) ? 0.0 + : (x == 2.0) ? 0.693147180559945 + : absl::random_internal::StirlingLogFactorial(x); + // clang-format on + const double lhs = 2.0 * std::log(u) + p.log_k_ + s; + if (lhs < rhs) + { + return x > static_cast((max)()) ? (max)() : static_cast(x); // f(x)/k >= u^2 + } + } + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const poisson_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.mean(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + poisson_distribution& x + ) + { // NOLINT(runtime/references) + using param_type = typename poisson_distribution::param_type; + + auto saver = random_internal::make_istream_state_saver(is); + double mean = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(param_type(mean)); + } + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_POISSON_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/random.h b/CAPI/cpp/grpc/include/absl/random/random.h new file mode 100644 index 00000000..0f973364 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/random.h @@ -0,0 +1,190 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: random.h +// ----------------------------------------------------------------------------- +// +// This header defines the recommended Uniform Random Bit Generator (URBG) +// types for use within the Abseil Random library. These types are not +// suitable for security-related use-cases, but should suffice for most other +// uses of generating random values. +// +// The Abseil random library provides the following URBG types: +// +// * BitGen, a good general-purpose bit generator, optimized for generating +// random (but not cryptographically secure) values +// * InsecureBitGen, a slightly faster, though less random, bit generator, for +// cases where the existing BitGen is a drag on performance. + +#ifndef ABSL_RANDOM_RANDOM_H_ +#define ABSL_RANDOM_RANDOM_H_ + +#include + +#include "absl/random/distributions.h" // IWYU pragma: export +#include "absl/random/internal/nonsecure_base.h" // IWYU pragma: export +#include "absl/random/internal/pcg_engine.h" // IWYU pragma: export +#include "absl/random/internal/pool_urbg.h" +#include "absl/random/internal/randen_engine.h" +#include "absl/random/seed_sequences.h" // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // absl::BitGen + // ----------------------------------------------------------------------------- + // + // `absl::BitGen` is a general-purpose random bit generator for generating + // random values for use within the Abseil random library. Typically, you use a + // bit generator in combination with a distribution to provide random values. + // + // Example: + // + // // Create an absl::BitGen. There is no need to seed this bit generator. + // absl::BitGen gen; + // + // // Generate an integer value in the closed interval [1,6] + // int die_roll = absl::uniform_int_distribution(1, 6)(gen); + // + // `absl::BitGen` is seeded by default with non-deterministic data to produce + // different sequences of random values across different instances, including + // different binary invocations. This behavior is different than the standard + // library bit generators, which use golden values as their seeds. Default + // construction intentionally provides no stability guarantees, to avoid + // accidental dependence on such a property. + // + // `absl::BitGen` may be constructed with an optional seed sequence type, + // conforming to [rand.req.seed_seq], which will be mixed with additional + // non-deterministic data as detailed below. + // + // Example: + // + // // Create an absl::BitGen using an std::seed_seq seed sequence + // std::seed_seq seq{1,2,3}; + // absl::BitGen gen_with_seed(seq); + // + // // Generate an integer value in the closed interval [1,6] + // int die_roll2 = absl::uniform_int_distribution(1, 6)(gen_with_seed); + // + // Constructing two `absl::BitGen`s with the same seed sequence in the same + // process will produce the same sequence of variates, but need not do so across + // multiple processes even if they're executing the same binary. + // + // `absl::BitGen` meets the requirements of the Uniform Random Bit Generator + // (URBG) concept as per the C++17 standard [rand.req.urng] though differs + // slightly with [rand.req.eng]. Like its standard library equivalents (e.g. + // `std::mersenne_twister_engine`) `absl::BitGen` is not cryptographically + // secure. + // + // This type has been optimized to perform better than Mersenne Twister + // (https://en.wikipedia.org/wiki/Mersenne_Twister) and many other complex URBG + // types on modern x86, ARM, and PPC architectures. + // + // This type is thread-compatible, but not thread-safe. + + // --------------------------------------------------------------------------- + // absl::BitGen member functions + // --------------------------------------------------------------------------- + + // absl::BitGen::operator()() + // + // Calls the BitGen, returning a generated value. + + // absl::BitGen::min() + // + // Returns the smallest possible value from this bit generator. + + // absl::BitGen::max() + // + // Returns the largest possible value from this bit generator. + + // absl::BitGen::discard(num) + // + // Advances the internal state of this bit generator by `num` times, and + // discards the intermediate results. + // --------------------------------------------------------------------------- + + using BitGen = random_internal::NonsecureURBGBase< + random_internal::randen_engine>; + + // ----------------------------------------------------------------------------- + // absl::InsecureBitGen + // ----------------------------------------------------------------------------- + // + // `absl::InsecureBitGen` is an efficient random bit generator for generating + // random values, recommended only for performance-sensitive use cases where + // `absl::BitGen` is not satisfactory when compute-bounded by bit generation + // costs. + // + // Example: + // + // // Create an absl::InsecureBitGen + // absl::InsecureBitGen gen; + // for (size_t i = 0; i < 1000000; i++) { + // + // // Generate a bunch of random values from some complex distribution + // auto my_rnd = some_distribution(gen, 1, 1000); + // } + // + // Like `absl::BitGen`, `absl::InsecureBitGen` is seeded by default with + // non-deterministic data to produce different sequences of random values across + // different instances, including different binary invocations. (This behavior + // is different than the standard library bit generators, which use golden + // values as their seeds.) + // + // `absl::InsecureBitGen` may be constructed with an optional seed sequence + // type, conforming to [rand.req.seed_seq], which will be mixed with additional + // non-deterministic data, as detailed in the `absl::BitGen` comment. + // + // `absl::InsecureBitGen` meets the requirements of the Uniform Random Bit + // Generator (URBG) concept as per the C++17 standard [rand.req.urng] though + // its implementation differs slightly with [rand.req.eng]. Like its standard + // library equivalents (e.g. `std::mersenne_twister_engine`) + // `absl::InsecureBitGen` is not cryptographically secure. + // + // Prefer `absl::BitGen` over `absl::InsecureBitGen` as the general type is + // often fast enough for the vast majority of applications. + + using InsecureBitGen = + random_internal::NonsecureURBGBase; + + // --------------------------------------------------------------------------- + // absl::InsecureBitGen member functions + // --------------------------------------------------------------------------- + + // absl::InsecureBitGen::operator()() + // + // Calls the InsecureBitGen, returning a generated value. + + // absl::InsecureBitGen::min() + // + // Returns the smallest possible value from this bit generator. + + // absl::InsecureBitGen::max() + // + // Returns the largest possible value from this bit generator. + + // absl::InsecureBitGen::discard(num) + // + // Advances the internal state of this bit generator by `num` times, and + // discards the intermediate results. + // --------------------------------------------------------------------------- + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_RANDOM_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/seed_gen_exception.h b/CAPI/cpp/grpc/include/absl/random/seed_gen_exception.h new file mode 100644 index 00000000..4c40da63 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/seed_gen_exception.h @@ -0,0 +1,58 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: seed_gen_exception.h +// ----------------------------------------------------------------------------- +// +// This header defines an exception class which may be thrown if unpredictable +// events prevent the derivation of suitable seed-material for constructing a +// bit generator conforming to [rand.req.urng] (eg. entropy cannot be read from +// /dev/urandom on a Unix-based system). +// +// Note: if exceptions are disabled, `std::terminate()` is called instead. + +#ifndef ABSL_RANDOM_SEED_GEN_EXCEPTION_H_ +#define ABSL_RANDOM_SEED_GEN_EXCEPTION_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + //------------------------------------------------------------------------------ + // SeedGenException + //------------------------------------------------------------------------------ + class SeedGenException : public std::exception + { + public: + SeedGenException() = default; + ~SeedGenException() override; + const char* what() const noexcept override; + }; + + namespace random_internal + { + + // throw delegator + [[noreturn]] void ThrowSeedGenException(); + + } // namespace random_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_SEED_GEN_EXCEPTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/seed_sequences.h b/CAPI/cpp/grpc/include/absl/random/seed_sequences.h new file mode 100644 index 00000000..ef694bfe --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/seed_sequences.h @@ -0,0 +1,115 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: seed_sequences.h +// ----------------------------------------------------------------------------- +// +// This header contains utilities for creating and working with seed sequences +// conforming to [rand.req.seedseq]. In general, direct construction of seed +// sequences is discouraged, but use-cases for construction of identical bit +// generators (using the same seed sequence) may be helpful (e.g. replaying a +// simulation whose state is derived from variates of a bit generator). + +#ifndef ABSL_RANDOM_SEED_SEQUENCES_H_ +#define ABSL_RANDOM_SEED_SEQUENCES_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/random/internal/salted_seed_seq.h" +#include "absl/random/internal/seed_material.h" +#include "absl/random/seed_gen_exception.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // absl::SeedSeq + // ----------------------------------------------------------------------------- + // + // `absl::SeedSeq` constructs a seed sequence according to [rand.req.seedseq] + // for use within bit generators. `absl::SeedSeq`, unlike `std::seed_seq` + // additionally salts the generated seeds with extra implementation-defined + // entropy. For that reason, you can use `absl::SeedSeq` in combination with + // standard library bit generators (e.g. `std::mt19937`) to introduce + // non-determinism in your seeds. + // + // Example: + // + // absl::SeedSeq my_seed_seq({a, b, c}); + // std::mt19937 my_bitgen(my_seed_seq); + // + using SeedSeq = random_internal::SaltedSeedSeq; + + // ----------------------------------------------------------------------------- + // absl::CreateSeedSeqFrom(bitgen*) + // ----------------------------------------------------------------------------- + // + // Constructs a seed sequence conforming to [rand.req.seedseq] using variates + // produced by a provided bit generator. + // + // You should generally avoid direct construction of seed sequences, but + // use-cases for reuse of a seed sequence to construct identical bit generators + // may be helpful (eg. replaying a simulation whose state is derived from bit + // generator values). + // + // If bitgen == nullptr, then behavior is undefined. + // + // Example: + // + // absl::BitGen my_bitgen; + // auto seed_seq = absl::CreateSeedSeqFrom(&my_bitgen); + // absl::BitGen new_engine(seed_seq); // derived from my_bitgen, but not + // // correlated. + // + template + SeedSeq CreateSeedSeqFrom(URBG* urbg) + { + SeedSeq::result_type + seed_material[random_internal::kEntropyBlocksNeeded]; + + if (!random_internal::ReadSeedMaterialFromURBG( + urbg, absl::MakeSpan(seed_material) + )) + { + random_internal::ThrowSeedGenException(); + } + return SeedSeq(std::begin(seed_material), std::end(seed_material)); + } + + // ----------------------------------------------------------------------------- + // absl::MakeSeedSeq() + // ----------------------------------------------------------------------------- + // + // Constructs an `absl::SeedSeq` salting the generated values using + // implementation-defined entropy. The returned sequence can be used to create + // equivalent bit generators correlated using this sequence. + // + // Example: + // + // auto my_seed_seq = absl::MakeSeedSeq(); + // std::mt19937 rng1(my_seed_seq); + // std::mt19937 rng2(my_seed_seq); + // EXPECT_EQ(rng1(), rng2()); + // + SeedSeq MakeSeedSeq(); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_SEED_SEQUENCES_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/uniform_int_distribution.h b/CAPI/cpp/grpc/include/absl/random/uniform_int_distribution.h new file mode 100644 index 00000000..25aa8e6a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/uniform_int_distribution.h @@ -0,0 +1,330 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: uniform_int_distribution.h +// ----------------------------------------------------------------------------- +// +// This header defines a class for representing a uniform integer distribution +// over the closed (inclusive) interval [a,b]. You use this distribution in +// combination with an Abseil random bit generator to produce random values +// according to the rules of the distribution. +// +// `absl::uniform_int_distribution` is a drop-in replacement for the C++11 +// `std::uniform_int_distribution` [rand.dist.uni.int] but is considerably +// faster than the libstdc++ implementation. + +#ifndef ABSL_RANDOM_UNIFORM_INT_DISTRIBUTION_H_ +#define ABSL_RANDOM_UNIFORM_INT_DISTRIBUTION_H_ + +#include +#include +#include +#include + +#include "absl/base/optimization.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" +#include "absl/random/internal/wide_multiply.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::uniform_int_distribution + // + // This distribution produces random integer values uniformly distributed in the + // closed (inclusive) interval [a, b]. + // + // Example: + // + // absl::BitGen gen; + // + // // Use the distribution to produce a value between 1 and 6, inclusive. + // int die_roll = absl::uniform_int_distribution(1, 6)(gen); + // + template + class uniform_int_distribution + { + private: + using unsigned_type = + typename random_internal::make_unsigned_bits::type; + + public: + using result_type = IntType; + + class param_type + { + public: + using distribution_type = uniform_int_distribution; + + explicit param_type( + result_type lo = 0, + result_type hi = (std::numeric_limits::max)() + ) : + lo_(lo), + range_(static_cast(hi) - static_cast(lo)) + { + // [rand.dist.uni.int] precondition 2 + assert(lo <= hi); + } + + result_type a() const + { + return lo_; + } + result_type b() const + { + return static_cast(static_cast(lo_) + range_); + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.lo_ == b.lo_ && a.range_ == b.range_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class uniform_int_distribution; + unsigned_type range() const + { + return range_; + } + + result_type lo_; + unsigned_type range_; + + static_assert(random_internal::IsIntegral::value, "Class-template absl::uniform_int_distribution<> must be " + "parameterized using an integral type."); + }; // param_type + + uniform_int_distribution() : + uniform_int_distribution(0) + { + } + + explicit uniform_int_distribution( + result_type lo, + result_type hi = (std::numeric_limits::max)() + ) : + param_(lo, hi) + { + } + + explicit uniform_int_distribution(const param_type& param) : + param_(param) + { + } + + // uniform_int_distribution::reset() + // + // Resets the uniform int distribution. Note that this function has no effect + // because the distribution already produces independent values. + void reset() + { + } + + template + result_type operator()(URBG& gen) + { // NOLINT(runtime/references) + return (*this)(gen, param()); + } + + template + result_type operator()( + URBG& gen, const param_type& param + ) + { // NOLINT(runtime/references) + return static_cast(param.a() + Generate(gen, param.range())); + } + + result_type a() const + { + return param_.a(); + } + result_type b() const + { + return param_.b(); + } + + param_type param() const + { + return param_; + } + void param(const param_type& params) + { + param_ = params; + } + + result_type(min)() const + { + return a(); + } + result_type(max)() const + { + return b(); + } + + friend bool operator==(const uniform_int_distribution& a, const uniform_int_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const uniform_int_distribution& a, const uniform_int_distribution& b) + { + return !(a == b); + } + + private: + // Generates a value in the *closed* interval [0, R] + template + unsigned_type Generate(URBG& g, // NOLINT(runtime/references) + unsigned_type R); + param_type param_; + }; + + // ----------------------------------------------------------------------------- + // Implementation details follow + // ----------------------------------------------------------------------------- + template + std::basic_ostream& operator<<( + std::basic_ostream& os, + const uniform_int_distribution& x + ) + { + using stream_type = + typename random_internal::stream_format_type::type; + auto saver = random_internal::make_ostream_state_saver(os); + os << static_cast(x.a()) << os.fill() + << static_cast(x.b()); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, + uniform_int_distribution& x + ) + { + using param_type = typename uniform_int_distribution::param_type; + using result_type = typename uniform_int_distribution::result_type; + using stream_type = + typename random_internal::stream_format_type::type; + + stream_type a; + stream_type b; + + auto saver = random_internal::make_istream_state_saver(is); + is >> a >> b; + if (!is.fail()) + { + x.param( + param_type(static_cast(a), static_cast(b)) + ); + } + return is; + } + + template + template + typename random_internal::make_unsigned_bits::type + uniform_int_distribution::Generate( + URBG& g, // NOLINT(runtime/references) + typename random_internal::make_unsigned_bits::type R + ) + { + random_internal::FastUniformBits fast_bits; + unsigned_type bits = fast_bits(g); + const unsigned_type Lim = R + 1; + if ((R & Lim) == 0) + { + // If the interval's length is a power of two range, just take the low bits. + return bits & R; + } + + // Generates a uniform variate on [0, Lim) using fixed-point multiplication. + // The above fast-path guarantees that Lim is representable in unsigned_type. + // + // Algorithm adapted from + // http://lemire.me/blog/2016/06/30/fast-random-shuffling/, with added + // explanation. + // + // The algorithm creates a uniform variate `bits` in the interval [0, 2^N), + // and treats it as the fractional part of a fixed-point real value in [0, 1), + // multiplied by 2^N. For example, 0.25 would be represented as 2^(N - 2), + // because 2^N * 0.25 == 2^(N - 2). + // + // Next, `bits` and `Lim` are multiplied with a wide-multiply to bring the + // value into the range [0, Lim). The integral part (the high word of the + // multiplication result) is then very nearly the desired result. However, + // this is not quite accurate; viewing the multiplication result as one + // double-width integer, the resulting values for the sample are mapped as + // follows: + // + // If the result lies in this interval: Return this value: + // [0, 2^N) 0 + // [2^N, 2 * 2^N) 1 + // ... ... + // [K * 2^N, (K + 1) * 2^N) K + // ... ... + // [(Lim - 1) * 2^N, Lim * 2^N) Lim - 1 + // + // While all of these intervals have the same size, the result of `bits * Lim` + // must be a multiple of `Lim`, and not all of these intervals contain the + // same number of multiples of `Lim`. In particular, some contain + // `F = floor(2^N / Lim)` and some contain `F + 1 = ceil(2^N / Lim)`. This + // difference produces a small nonuniformity, which is corrected by applying + // rejection sampling to one of the values in the "larger intervals" (i.e., + // the intervals containing `F + 1` multiples of `Lim`. + // + // An interval contains `F + 1` multiples of `Lim` if and only if its smallest + // value modulo 2^N is less than `2^N % Lim`. The unique value satisfying + // this property is used as the one for rejection. That is, a value of + // `bits * Lim` is rejected if `(bit * Lim) % 2^N < (2^N % Lim)`. + + using helper = random_internal::wide_multiply; + auto product = helper::multiply(bits, Lim); + + // Two optimizations here: + // * Rejection occurs with some probability less than 1/2, and for reasonable + // ranges considerably less (in particular, less than 1/(F+1)), so + // ABSL_PREDICT_FALSE is apt. + // * `Lim` is an overestimate of `threshold`, and doesn't require a divide. + if (ABSL_PREDICT_FALSE(helper::lo(product) < Lim)) + { + // This quantity is exactly equal to `2^N % Lim`, but does not require high + // precision calculations: `2^N % Lim` is congruent to `(2^N - Lim) % Lim`. + // Ideally this could be expressed simply as `-X` rather than `2^N - X`, but + // for types smaller than int, this calculation is incorrect due to integer + // promotion rules. + const unsigned_type threshold = + ((std::numeric_limits::max)() - Lim + 1) % Lim; + while (helper::lo(product) < threshold) + { + bits = fast_bits(g); + product = helper::multiply(bits, Lim); + } + } + + return helper::hi(product); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_UNIFORM_INT_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/uniform_real_distribution.h b/CAPI/cpp/grpc/include/absl/random/uniform_real_distribution.h new file mode 100644 index 00000000..8bc215be --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/uniform_real_distribution.h @@ -0,0 +1,255 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: uniform_real_distribution.h +// ----------------------------------------------------------------------------- +// +// This header defines a class for representing a uniform floating-point +// distribution over a half-open interval [a,b). You use this distribution in +// combination with an Abseil random bit generator to produce random values +// according to the rules of the distribution. +// +// `absl::uniform_real_distribution` is a drop-in replacement for the C++11 +// `std::uniform_real_distribution` [rand.dist.uni.real] but is considerably +// faster than the libstdc++ implementation. +// +// Note: the standard-library version may occasionally return `1.0` when +// default-initialized. See https://bugs.llvm.org//show_bug.cgi?id=18767 +// `absl::uniform_real_distribution` does not exhibit this behavior. + +#ifndef ABSL_RANDOM_UNIFORM_REAL_DISTRIBUTION_H_ +#define ABSL_RANDOM_UNIFORM_REAL_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/random/internal/fast_uniform_bits.h" +#include "absl/random/internal/generate_real.h" +#include "absl/random/internal/iostream_state_saver.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::uniform_real_distribution + // + // This distribution produces random floating-point values uniformly distributed + // over the half-open interval [a, b). + // + // Example: + // + // absl::BitGen gen; + // + // // Use the distribution to produce a value between 0.0 (inclusive) + // // and 1.0 (exclusive). + // double value = absl::uniform_real_distribution(0, 1)(gen); + // + template + class uniform_real_distribution + { + public: + using result_type = RealType; + + class param_type + { + public: + using distribution_type = uniform_real_distribution; + + explicit param_type(result_type lo = 0, result_type hi = 1) : + lo_(lo), + hi_(hi), + range_(hi - lo) + { + // [rand.dist.uni.real] preconditions 2 & 3 + assert(lo <= hi); + + // NOTE: For integral types, we can promote the range to an unsigned type, + // which gives full width of the range. However for real (fp) types, this + // is not possible, so value generation cannot use the full range of the + // real type. + assert(range_ <= (std::numeric_limits::max)()); + } + + result_type a() const + { + return lo_; + } + result_type b() const + { + return hi_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.lo_ == b.lo_ && a.hi_ == b.hi_; + } + + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class uniform_real_distribution; + result_type lo_, hi_, range_; + + static_assert(std::is_floating_point::value, "Class-template absl::uniform_real_distribution<> must be " + "parameterized using a floating-point type."); + }; + + uniform_real_distribution() : + uniform_real_distribution(0) + { + } + + explicit uniform_real_distribution(result_type lo, result_type hi = 1) : + param_(lo, hi) + { + } + + explicit uniform_real_distribution(const param_type& param) : + param_(param) + { + } + + // uniform_real_distribution::reset() + // + // Resets the uniform real distribution. Note that this function has no effect + // because the distribution already produces independent values. + void reset() + { + } + + template + result_type operator()(URBG& gen) + { // NOLINT(runtime/references) + return operator()(gen, param_); + } + + template + result_type operator()(URBG& gen, // NOLINT(runtime/references) + const param_type& p); + + result_type a() const + { + return param_.a(); + } + result_type b() const + { + return param_.b(); + } + + param_type param() const + { + return param_; + } + void param(const param_type& params) + { + param_ = params; + } + + result_type(min)() const + { + return a(); + } + result_type(max)() const + { + return b(); + } + + friend bool operator==(const uniform_real_distribution& a, const uniform_real_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const uniform_real_distribution& a, const uniform_real_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + random_internal::FastUniformBits fast_u64_; + }; + + // ----------------------------------------------------------------------------- + // Implementation details follow + // ----------------------------------------------------------------------------- + template + template + typename uniform_real_distribution::result_type + uniform_real_distribution::operator()( + URBG& gen, const param_type& p + ) + { // NOLINT(runtime/references) + using random_internal::GeneratePositiveTag; + using random_internal::GenerateRealFromBits; + using real_type = + absl::conditional_t::value, float, double>; + + while (true) + { + const result_type sample = + GenerateRealFromBits( + fast_u64_(gen) + ); + const result_type res = p.a() + (sample * p.range_); + if (res < p.b() || p.range_ <= 0 || !std::isfinite(p.range_)) + { + return res; + } + // else sample rejected, try again. + } + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const uniform_real_distribution& x + ) + { + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << x.a() << os.fill() << x.b(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + uniform_real_distribution& x + ) + { // NOLINT(runtime/references) + using param_type = typename uniform_real_distribution::param_type; + using result_type = typename uniform_real_distribution::result_type; + auto saver = random_internal::make_istream_state_saver(is); + auto a = random_internal::read_floating_point(is); + if (is.fail()) + return is; + auto b = random_internal::read_floating_point(is); + if (!is.fail()) + { + x.param(param_type(a, b)); + } + return is; + } + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_UNIFORM_REAL_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/random/zipf_distribution.h b/CAPI/cpp/grpc/include/absl/random/zipf_distribution.h new file mode 100644 index 00000000..37c1c090 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/random/zipf_distribution.h @@ -0,0 +1,337 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_ZIPF_DISTRIBUTION_H_ +#define ABSL_RANDOM_ZIPF_DISTRIBUTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/random/internal/iostream_state_saver.h" +#include "absl/random/internal/traits.h" +#include "absl/random/uniform_real_distribution.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::zipf_distribution produces random integer-values in the range [0, k], + // distributed according to the unnormalized discrete probability function: + // + // P(x) = (v + x) ^ -q + // + // The parameter `v` must be greater than 0 and the parameter `q` must be + // greater than 1. If either of these parameters take invalid values then the + // behavior is undefined. + // + // IntType is the result_type generated by the generator. It must be of integral + // type; a static_assert ensures this is the case. + // + // The implementation is based on W.Hormann, G.Derflinger: + // + // "Rejection-Inversion to Generate Variates from Monotone Discrete + // Distributions" + // + // http://eeyore.wu-wien.ac.at/papers/96-04-04.wh-der.ps.gz + // + template + class zipf_distribution + { + public: + using result_type = IntType; + + class param_type + { + public: + using distribution_type = zipf_distribution; + + // Preconditions: k > 0, v > 0, q > 1 + // The precondidtions are validated when NDEBUG is not defined via + // a pair of assert() directives. + // If NDEBUG is defined and either or both of these parameters take invalid + // values, the behavior of the class is undefined. + explicit param_type(result_type k = (std::numeric_limits::max)(), double q = 2.0, double v = 1.0); + + result_type k() const + { + return k_; + } + double q() const + { + return q_; + } + double v() const + { + return v_; + } + + friend bool operator==(const param_type& a, const param_type& b) + { + return a.k_ == b.k_ && a.q_ == b.q_ && a.v_ == b.v_; + } + friend bool operator!=(const param_type& a, const param_type& b) + { + return !(a == b); + } + + private: + friend class zipf_distribution; + inline double h(double x) const; + inline double hinv(double x) const; + inline double compute_s() const; + inline double pow_negative_q(double x) const; + + // Parameters here are exactly the same as the parameters of Algorithm ZRI + // in the paper. + IntType k_; + double q_; + double v_; + + double one_minus_q_; // 1-q + double s_; + double one_minus_q_inv_; // 1 / 1-q + double hxm_; // h(k + 0.5) + double hx0_minus_hxm_; // h(x0) - h(k + 0.5) + + static_assert(random_internal::IsIntegral::value, "Class-template absl::zipf_distribution<> must be " + "parameterized using an integral type."); + }; + + zipf_distribution() : + zipf_distribution((std::numeric_limits::max)()) + { + } + + explicit zipf_distribution(result_type k, double q = 2.0, double v = 1.0) : + param_(k, q, v) + { + } + + explicit zipf_distribution(const param_type& p) : + param_(p) + { + } + + void reset() + { + } + + template + result_type operator()(URBG& g) + { // NOLINT(runtime/references) + return (*this)(g, param_); + } + + template + result_type operator()(URBG& g, // NOLINT(runtime/references) + const param_type& p); + + result_type k() const + { + return param_.k(); + } + double q() const + { + return param_.q(); + } + double v() const + { + return param_.v(); + } + + param_type param() const + { + return param_; + } + void param(const param_type& p) + { + param_ = p; + } + + result_type(min)() const + { + return 0; + } + result_type(max)() const + { + return k(); + } + + friend bool operator==(const zipf_distribution& a, const zipf_distribution& b) + { + return a.param_ == b.param_; + } + friend bool operator!=(const zipf_distribution& a, const zipf_distribution& b) + { + return a.param_ != b.param_; + } + + private: + param_type param_; + }; + + // -------------------------------------------------------------------------- + // Implementation details follow + // -------------------------------------------------------------------------- + + template + zipf_distribution::param_type::param_type( + typename zipf_distribution::result_type k, double q, double v + ) : + k_(k), + q_(q), + v_(v), + one_minus_q_(1 - q) + { + assert(q > 1); + assert(v > 0); + assert(k > 0); + one_minus_q_inv_ = 1 / one_minus_q_; + + // Setup for the ZRI algorithm (pg 17 of the paper). + // Compute: h(i max) => h(k + 0.5) + constexpr double kMax = 18446744073709549568.0; + double kd = static_cast(k); + // TODO(absl-team): Determine if this check is needed, and if so, add a test + // that fails for k > kMax + if (kd > kMax) + { + // Ensure that our maximum value is capped to a value which will + // round-trip back through double. + kd = kMax; + } + hxm_ = h(kd + 0.5); + + // Compute: h(0) + const bool use_precomputed = (v == 1.0 && q == 2.0); + const double h0x5 = use_precomputed ? (-1.0 / 1.5) // exp(-log(1.5)) + : + h(0.5); + const double elogv_q = (v_ == 1.0) ? 1 : pow_negative_q(v_); + + // h(0) = h(0.5) - exp(log(v) * -q) + hx0_minus_hxm_ = (h0x5 - elogv_q) - hxm_; + + // And s + s_ = use_precomputed ? 0.46153846153846123 : compute_s(); + } + + template + double zipf_distribution::param_type::h(double x) const + { + // std::exp(one_minus_q_ * std::log(v_ + x)) * one_minus_q_inv_; + x += v_; + return (one_minus_q_ == -1.0) ? (-1.0 / x) // -exp(-log(x)) + : + (std::exp(std::log(x) * one_minus_q_) * one_minus_q_inv_); + } + + template + double zipf_distribution::param_type::hinv(double x) const + { + // std::exp(one_minus_q_inv_ * std::log(one_minus_q_ * x)) - v_; + return -v_ + ((one_minus_q_ == -1.0) ? (-1.0 / x) // exp(-log(-x)) + : + std::exp(one_minus_q_inv_ * std::log(one_minus_q_ * x))); + } + + template + double zipf_distribution::param_type::compute_s() const + { + // 1 - hinv(h(1.5) - std::exp(std::log(v_ + 1) * -q_)); + return 1.0 - hinv(h(1.5) - pow_negative_q(v_ + 1.0)); + } + + template + double zipf_distribution::param_type::pow_negative_q(double x) const + { + // std::exp(std::log(x) * -q_); + return q_ == 2.0 ? (1.0 / (x * x)) : std::exp(std::log(x) * -q_); + } + + template + template + typename zipf_distribution::result_type + zipf_distribution::operator()( + URBG& g, const param_type& p + ) + { // NOLINT(runtime/references) + absl::uniform_real_distribution uniform_double; + double k; + for (;;) + { + const double v = uniform_double(g); + const double u = p.hxm_ + v * p.hx0_minus_hxm_; + const double x = p.hinv(u); + k = rint(x); // std::floor(x + 0.5); + if (k > static_cast(p.k())) + continue; // reject k > max_k + if (k - x <= p.s_) + break; + const double h = p.h(k + 0.5); + const double r = p.pow_negative_q(p.v_ + k); + if (u >= h - r) + break; + } + IntType ki = static_cast(k); + assert(ki <= p.k_); + return ki; + } + + template + std::basic_ostream& operator<<( + std::basic_ostream& os, // NOLINT(runtime/references) + const zipf_distribution& x + ) + { + using stream_type = + typename random_internal::stream_format_type::type; + auto saver = random_internal::make_ostream_state_saver(os); + os.precision(random_internal::stream_precision_helper::kPrecision); + os << static_cast(x.k()) << os.fill() << x.q() << os.fill() + << x.v(); + return os; + } + + template + std::basic_istream& operator>>( + std::basic_istream& is, // NOLINT(runtime/references) + zipf_distribution& x + ) + { // NOLINT(runtime/references) + using result_type = typename zipf_distribution::result_type; + using param_type = typename zipf_distribution::param_type; + using stream_type = + typename random_internal::stream_format_type::type; + stream_type k; + double q; + double v; + + auto saver = random_internal::make_istream_state_saver(is); + is >> k >> q >> v; + if (!is.fail()) + { + x.param(param_type(static_cast(k), q, v)); + } + return is; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_ZIPF_DISTRIBUTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/status/internal/status_internal.h b/CAPI/cpp/grpc/include/absl/status/internal/status_internal.h new file mode 100644 index 00000000..7fa2e530 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/status/internal/status_internal.h @@ -0,0 +1,96 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_ +#define ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/container/inlined_vector.h" +#include "absl/strings/cord.h" + +#ifndef SWIG +// Disabled for SWIG as it doesn't parse attributes correctly. +namespace absl +{ + ABSL_NAMESPACE_BEGIN +// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs +// as part of a class definitions (b/6995610), so we use a forward declaration. +// +// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict +// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) + class [[nodiscard]] Status; +#else + class ABSL_MUST_USE_RESULT Status; +#endif + ABSL_NAMESPACE_END +} // namespace absl +#endif // !SWIG + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + enum class StatusCode : int; + + namespace status_internal + { + + // Container for status payloads. + struct Payload + { + std::string type_url; + absl::Cord payload; + }; + + using Payloads = absl::InlinedVector; + + // Reference-counted representation of Status data. + struct StatusRep + { + StatusRep(absl::StatusCode code_arg, absl::string_view message_arg, std::unique_ptr payloads_arg) : + ref(int32_t{1}), + code(code_arg), + message(message_arg), + payloads(std::move(payloads_arg)) + { + } + + std::atomic ref; + absl::StatusCode code; + + // As an internal implementation detail, we guarantee that if status.message() + // is non-empty, then the resulting string_view is null terminated. + // This is required to implement 'StatusMessageAsCStr(...)' + std::string message; + std::unique_ptr payloads; + }; + + absl::StatusCode MapToLocalCode(int value); + + // Returns a pointer to a newly-allocated string with the given `prefix`, + // suitable for output as an error message in assertion/`CHECK()` failures. + // + // This is an internal implementation detail for Abseil logging. + std::string* MakeCheckFailString(const absl::Status* status, const char* prefix); + + } // namespace status_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/status/internal/statusor_internal.h b/CAPI/cpp/grpc/include/absl/status/internal/statusor_internal.h new file mode 100644 index 00000000..0e4ce0d4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/status/internal/statusor_internal.h @@ -0,0 +1,436 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_ +#define ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/meta/type_traits.h" +#include "absl/status/status.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class ABSL_MUST_USE_RESULT StatusOr; + + namespace internal_statusor + { + + // Detects whether `U` has conversion operator to `StatusOr`, i.e. `operator + // StatusOr()`. + template + struct HasConversionOperatorToStatusOr : std::false_type + { + }; + + template + void test(char (*)[sizeof(std::declval().operator absl::StatusOr())]); + + template + struct HasConversionOperatorToStatusOr(0))> : std::true_type + { + }; + + // Detects whether `T` is constructible or convertible from `StatusOr`. + template + using IsConstructibleOrConvertibleFromStatusOr = + absl::disjunction&>, std::is_constructible&>, std::is_constructible&&>, std::is_constructible&&>, std::is_convertible&, T>, std::is_convertible&, T>, std::is_convertible&&, T>, std::is_convertible&&, T>>; + + // Detects whether `T` is constructible or convertible or assignable from + // `StatusOr`. + template + using IsConstructibleOrConvertibleOrAssignableFromStatusOr = + absl::disjunction, std::is_assignable&>, std::is_assignable&>, std::is_assignable&&>, std::is_assignable&&>>; + + // Detects whether direct initializing `StatusOr` from `U` is ambiguous, i.e. + // when `U` is `StatusOr` and `T` is constructible or convertible from `V`. + template + struct IsDirectInitializationAmbiguous : public absl::conditional_t, U>::value, std::false_type, IsDirectInitializationAmbiguous>> + { + }; + + template + struct IsDirectInitializationAmbiguous> : public IsConstructibleOrConvertibleFromStatusOr + { + }; + + // Checks against the constraints of the direction initialization, i.e. when + // `StatusOr::StatusOr(U&&)` should participate in overload resolution. + template + using IsDirectInitializationValid = absl::disjunction< + // Short circuits if T is basically U. + std::is_same>, + absl::negation, absl::remove_cvref_t>, + std::is_same>, + std::is_same>, + IsDirectInitializationAmbiguous>>>; + + // This trait detects whether `StatusOr::operator=(U&&)` is ambiguous, which + // is equivalent to whether all the following conditions are met: + // 1. `U` is `StatusOr`. + // 2. `T` is constructible and assignable from `V`. + // 3. `T` is constructible and assignable from `U` (i.e. `StatusOr`). + // For example, the following code is considered ambiguous: + // (`T` is `bool`, `U` is `StatusOr`, `V` is `bool`) + // StatusOr s1 = true; // s1.ok() && s1.ValueOrDie() == true + // StatusOr s2 = false; // s2.ok() && s2.ValueOrDie() == false + // s1 = s2; // ambiguous, `s1 = s2.ValueOrDie()` or `s1 = bool(s2)`? + template + struct IsForwardingAssignmentAmbiguous : public absl::conditional_t, U>::value, std::false_type, IsForwardingAssignmentAmbiguous>> + { + }; + + template + struct IsForwardingAssignmentAmbiguous> : public IsConstructibleOrConvertibleOrAssignableFromStatusOr + { + }; + + // Checks against the constraints of the forwarding assignment, i.e. whether + // `StatusOr::operator(U&&)` should participate in overload resolution. + template + using IsForwardingAssignmentValid = absl::disjunction< + // Short circuits if T is basically U. + std::is_same>, + absl::negation, absl::remove_cvref_t>, + std::is_same>, + std::is_same>, + IsForwardingAssignmentAmbiguous>>>; + + class Helper + { + public: + // Move type-agnostic error handling to the .cc. + static void HandleInvalidStatusCtorArg(Status*); + ABSL_ATTRIBUTE_NORETURN static void Crash(const absl::Status& status); + }; + + // Construct an instance of T in `p` through placement new, passing Args... to + // the constructor. + // This abstraction is here mostly for the gcc performance fix. + template + ABSL_ATTRIBUTE_NONNULL(1) + void PlacementNew(void* p, Args&&... args) + { + new (p) T(std::forward(args)...); + } + + // Helper base class to hold the data and all operations. + // We move all this to a base class to allow mixing with the appropriate + // TraitsBase specialization. + template + class StatusOrData + { + template + friend class StatusOrData; + + public: + StatusOrData() = delete; + + StatusOrData(const StatusOrData& other) + { + if (other.ok()) + { + MakeValue(other.data_); + MakeStatus(); + } + else + { + MakeStatus(other.status_); + } + } + + StatusOrData(StatusOrData&& other) noexcept + { + if (other.ok()) + { + MakeValue(std::move(other.data_)); + MakeStatus(); + } + else + { + MakeStatus(std::move(other.status_)); + } + } + + template + explicit StatusOrData(const StatusOrData& other) + { + if (other.ok()) + { + MakeValue(other.data_); + MakeStatus(); + } + else + { + MakeStatus(other.status_); + } + } + + template + explicit StatusOrData(StatusOrData&& other) + { + if (other.ok()) + { + MakeValue(std::move(other.data_)); + MakeStatus(); + } + else + { + MakeStatus(std::move(other.status_)); + } + } + + template + explicit StatusOrData(absl::in_place_t, Args&&... args) : + data_(std::forward(args)...) + { + MakeStatus(); + } + + explicit StatusOrData(const T& value) : + data_(value) + { + MakeStatus(); + } + explicit StatusOrData(T&& value) : + data_(std::move(value)) + { + MakeStatus(); + } + + template::value, int> = 0> + explicit StatusOrData(U&& v) : + status_(std::forward(v)) + { + EnsureNotOk(); + } + + StatusOrData& operator=(const StatusOrData& other) + { + if (this == &other) + return *this; + if (other.ok()) + Assign(other.data_); + else + AssignStatus(other.status_); + return *this; + } + + StatusOrData& operator=(StatusOrData&& other) + { + if (this == &other) + return *this; + if (other.ok()) + Assign(std::move(other.data_)); + else + AssignStatus(std::move(other.status_)); + return *this; + } + + ~StatusOrData() + { + if (ok()) + { + status_.~Status(); + data_.~T(); + } + else + { + status_.~Status(); + } + } + + template + void Assign(U&& value) + { + if (ok()) + { + data_ = std::forward(value); + } + else + { + MakeValue(std::forward(value)); + status_ = OkStatus(); + } + } + + template + void AssignStatus(U&& v) + { + Clear(); + status_ = static_cast(std::forward(v)); + EnsureNotOk(); + } + + bool ok() const + { + return status_.ok(); + } + + protected: + // status_ will always be active after the constructor. + // We make it a union to be able to initialize exactly how we need without + // waste. + // Eg. in the copy constructor we use the default constructor of Status in + // the ok() path to avoid an extra Ref call. + union + { + Status status_; + }; + + // data_ is active iff status_.ok()==true + struct Dummy + { + }; + union + { + // When T is const, we need some non-const object we can cast to void* for + // the placement new. dummy_ is that object. + Dummy dummy_; + T data_; + }; + + void Clear() + { + if (ok()) + data_.~T(); + } + + void EnsureOk() const + { + if (ABSL_PREDICT_FALSE(!ok())) + Helper::Crash(status_); + } + + void EnsureNotOk() + { + if (ABSL_PREDICT_FALSE(ok())) + Helper::HandleInvalidStatusCtorArg(&status_); + } + + // Construct the value (ie. data_) through placement new with the passed + // argument. + template + void MakeValue(Arg&&... arg) + { + internal_statusor::PlacementNew(&dummy_, std::forward(arg)...); + } + + // Construct the status (ie. status_) through placement new with the passed + // argument. + template + void MakeStatus(Args&&... args) + { + internal_statusor::PlacementNew(&status_, std::forward(args)...); + } + }; + + // Helper base classes to allow implicitly deleted constructors and assignment + // operators in `StatusOr`. For example, `CopyCtorBase` will explicitly delete + // the copy constructor when T is not copy constructible and `StatusOr` will + // inherit that behavior implicitly. + template::value> + struct CopyCtorBase + { + CopyCtorBase() = default; + CopyCtorBase(const CopyCtorBase&) = default; + CopyCtorBase(CopyCtorBase&&) = default; + CopyCtorBase& operator=(const CopyCtorBase&) = default; + CopyCtorBase& operator=(CopyCtorBase&&) = default; + }; + + template + struct CopyCtorBase + { + CopyCtorBase() = default; + CopyCtorBase(const CopyCtorBase&) = delete; + CopyCtorBase(CopyCtorBase&&) = default; + CopyCtorBase& operator=(const CopyCtorBase&) = default; + CopyCtorBase& operator=(CopyCtorBase&&) = default; + }; + + template::value> + struct MoveCtorBase + { + MoveCtorBase() = default; + MoveCtorBase(const MoveCtorBase&) = default; + MoveCtorBase(MoveCtorBase&&) = default; + MoveCtorBase& operator=(const MoveCtorBase&) = default; + MoveCtorBase& operator=(MoveCtorBase&&) = default; + }; + + template + struct MoveCtorBase + { + MoveCtorBase() = default; + MoveCtorBase(const MoveCtorBase&) = default; + MoveCtorBase(MoveCtorBase&&) = delete; + MoveCtorBase& operator=(const MoveCtorBase&) = default; + MoveCtorBase& operator=(MoveCtorBase&&) = default; + }; + + template::value&& std::is_copy_assignable::value> + struct CopyAssignBase + { + CopyAssignBase() = default; + CopyAssignBase(const CopyAssignBase&) = default; + CopyAssignBase(CopyAssignBase&&) = default; + CopyAssignBase& operator=(const CopyAssignBase&) = default; + CopyAssignBase& operator=(CopyAssignBase&&) = default; + }; + + template + struct CopyAssignBase + { + CopyAssignBase() = default; + CopyAssignBase(const CopyAssignBase&) = default; + CopyAssignBase(CopyAssignBase&&) = default; + CopyAssignBase& operator=(const CopyAssignBase&) = delete; + CopyAssignBase& operator=(CopyAssignBase&&) = default; + }; + + template::value&& std::is_move_assignable::value> + struct MoveAssignBase + { + MoveAssignBase() = default; + MoveAssignBase(const MoveAssignBase&) = default; + MoveAssignBase(MoveAssignBase&&) = default; + MoveAssignBase& operator=(const MoveAssignBase&) = default; + MoveAssignBase& operator=(MoveAssignBase&&) = default; + }; + + template + struct MoveAssignBase + { + MoveAssignBase() = default; + MoveAssignBase(const MoveAssignBase&) = default; + MoveAssignBase(MoveAssignBase&&) = default; + MoveAssignBase& operator=(const MoveAssignBase&) = default; + MoveAssignBase& operator=(MoveAssignBase&&) = delete; + }; + + ABSL_ATTRIBUTE_NORETURN void ThrowBadStatusOrAccess(absl::Status status); + + } // namespace internal_statusor + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STATUS_INTERNAL_STATUSOR_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/status/status.h b/CAPI/cpp/grpc/include/absl/status/status.h new file mode 100644 index 00000000..76e60ae4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/status/status.h @@ -0,0 +1,954 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: status.h +// ----------------------------------------------------------------------------- +// +// This header file defines the Abseil `status` library, consisting of: +// +// * An `absl::Status` class for holding error handling information +// * A set of canonical `absl::StatusCode` error codes, and associated +// utilities for generating and propagating status codes. +// * A set of helper functions for creating status codes and checking their +// values +// +// Within Google, `absl::Status` is the primary mechanism for communicating +// errors in C++, and is used to represent error state in both in-process +// library calls as well as RPC calls. Some of these errors may be recoverable, +// but others may not. Most functions that can produce a recoverable error +// should be designed to return an `absl::Status` (or `absl::StatusOr`). +// +// Example: +// +// absl::Status myFunction(absl::string_view fname, ...) { +// ... +// // encounter error +// if (error condition) { +// return absl::InvalidArgumentError("bad mode"); +// } +// // else, return OK +// return absl::OkStatus(); +// } +// +// An `absl::Status` is designed to either return "OK" or one of a number of +// different error codes, corresponding to typical error conditions. +// In almost all cases, when using `absl::Status` you should use the canonical +// error codes (of type `absl::StatusCode`) enumerated in this header file. +// These canonical codes are understood across the codebase and will be +// accepted across all API and RPC boundaries. +#ifndef ABSL_STATUS_STATUS_H_ +#define ABSL_STATUS_STATUS_H_ + +#include +#include +#include + +#include "absl/functional/function_ref.h" +#include "absl/status/internal/status_internal.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::StatusCode + // + // An `absl::StatusCode` is an enumerated type indicating either no error ("OK") + // or an error condition. In most cases, an `absl::Status` indicates a + // recoverable error, and the purpose of signalling an error is to indicate what + // action to take in response to that error. These error codes map to the proto + // RPC error codes indicated in https://cloud.google.com/apis/design/errors. + // + // The errors listed below are the canonical errors associated with + // `absl::Status` and are used throughout the codebase. As a result, these + // error codes are somewhat generic. + // + // In general, try to return the most specific error that applies if more than + // one error may pertain. For example, prefer `kOutOfRange` over + // `kFailedPrecondition` if both codes apply. Similarly prefer `kNotFound` or + // `kAlreadyExists` over `kFailedPrecondition`. + // + // Because these errors may cross RPC boundaries, these codes are tied to the + // `google.rpc.Code` definitions within + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto + // The string value of these RPC codes is denoted within each enum below. + // + // If your error handling code requires more context, you can attach payloads + // to your status. See `absl::Status::SetPayload()` and + // `absl::Status::GetPayload()` below. + enum class StatusCode : int + { + // StatusCode::kOk + // + // kOK (gRPC code "OK") does not indicate an error; this value is returned on + // success. It is typical to check for this value before proceeding on any + // given call across an API or RPC boundary. To check this value, use the + // `absl::Status::ok()` member function rather than inspecting the raw code. + kOk = 0, + + // StatusCode::kCancelled + // + // kCancelled (gRPC code "CANCELLED") indicates the operation was cancelled, + // typically by the caller. + kCancelled = 1, + + // StatusCode::kUnknown + // + // kUnknown (gRPC code "UNKNOWN") indicates an unknown error occurred. In + // general, more specific errors should be raised, if possible. Errors raised + // by APIs that do not return enough error information may be converted to + // this error. + kUnknown = 2, + + // StatusCode::kInvalidArgument + // + // kInvalidArgument (gRPC code "INVALID_ARGUMENT") indicates the caller + // specified an invalid argument, such as a malformed filename. Note that use + // of such errors should be narrowly limited to indicate the invalid nature of + // the arguments themselves. Errors with validly formed arguments that may + // cause errors with the state of the receiving system should be denoted with + // `kFailedPrecondition` instead. + kInvalidArgument = 3, + + // StatusCode::kDeadlineExceeded + // + // kDeadlineExceeded (gRPC code "DEADLINE_EXCEEDED") indicates a deadline + // expired before the operation could complete. For operations that may change + // state within a system, this error may be returned even if the operation has + // completed successfully. For example, a successful response from a server + // could have been delayed long enough for the deadline to expire. + kDeadlineExceeded = 4, + + // StatusCode::kNotFound + // + // kNotFound (gRPC code "NOT_FOUND") indicates some requested entity (such as + // a file or directory) was not found. + // + // `kNotFound` is useful if a request should be denied for an entire class of + // users, such as during a gradual feature rollout or undocumented allow list. + // If a request should be denied for specific sets of users, such as through + // user-based access control, use `kPermissionDenied` instead. + kNotFound = 5, + + // StatusCode::kAlreadyExists + // + // kAlreadyExists (gRPC code "ALREADY_EXISTS") indicates that the entity a + // caller attempted to create (such as a file or directory) is already + // present. + kAlreadyExists = 6, + + // StatusCode::kPermissionDenied + // + // kPermissionDenied (gRPC code "PERMISSION_DENIED") indicates that the caller + // does not have permission to execute the specified operation. Note that this + // error is different than an error due to an *un*authenticated user. This + // error code does not imply the request is valid or the requested entity + // exists or satisfies any other pre-conditions. + // + // `kPermissionDenied` must not be used for rejections caused by exhausting + // some resource. Instead, use `kResourceExhausted` for those errors. + // `kPermissionDenied` must not be used if the caller cannot be identified. + // Instead, use `kUnauthenticated` for those errors. + kPermissionDenied = 7, + + // StatusCode::kResourceExhausted + // + // kResourceExhausted (gRPC code "RESOURCE_EXHAUSTED") indicates some resource + // has been exhausted, perhaps a per-user quota, or perhaps the entire file + // system is out of space. + kResourceExhausted = 8, + + // StatusCode::kFailedPrecondition + // + // kFailedPrecondition (gRPC code "FAILED_PRECONDITION") indicates that the + // operation was rejected because the system is not in a state required for + // the operation's execution. For example, a directory to be deleted may be + // non-empty, an "rmdir" operation is applied to a non-directory, etc. + // + // Some guidelines that may help a service implementer in deciding between + // `kFailedPrecondition`, `kAborted`, and `kUnavailable`: + // + // (a) Use `kUnavailable` if the client can retry just the failing call. + // (b) Use `kAborted` if the client should retry at a higher transaction + // level (such as when a client-specified test-and-set fails, indicating + // the client should restart a read-modify-write sequence). + // (c) Use `kFailedPrecondition` if the client should not retry until + // the system state has been explicitly fixed. For example, if a "rmdir" + // fails because the directory is non-empty, `kFailedPrecondition` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + kFailedPrecondition = 9, + + // StatusCode::kAborted + // + // kAborted (gRPC code "ABORTED") indicates the operation was aborted, + // typically due to a concurrency issue such as a sequencer check failure or a + // failed transaction. + // + // See the guidelines above for deciding between `kFailedPrecondition`, + // `kAborted`, and `kUnavailable`. + kAborted = 10, + + // StatusCode::kOutOfRange + // + // kOutOfRange (gRPC code "OUT_OF_RANGE") indicates the operation was + // attempted past the valid range, such as seeking or reading past an + // end-of-file. + // + // Unlike `kInvalidArgument`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `kInvalidArgument` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `kOutOfRange` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `kFailedPrecondition` and + // `kOutOfRange`. We recommend using `kOutOfRange` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `kOutOfRange` error to detect when + // they are done. + kOutOfRange = 11, + + // StatusCode::kUnimplemented + // + // kUnimplemented (gRPC code "UNIMPLEMENTED") indicates the operation is not + // implemented or supported in this service. In this case, the operation + // should not be re-attempted. + kUnimplemented = 12, + + // StatusCode::kInternal + // + // kInternal (gRPC code "INTERNAL") indicates an internal error has occurred + // and some invariants expected by the underlying system have not been + // satisfied. This error code is reserved for serious errors. + kInternal = 13, + + // StatusCode::kUnavailable + // + // kUnavailable (gRPC code "UNAVAILABLE") indicates the service is currently + // unavailable and that this is most likely a transient condition. An error + // such as this can be corrected by retrying with a backoff scheme. Note that + // it is not always safe to retry non-idempotent operations. + // + // See the guidelines above for deciding between `kFailedPrecondition`, + // `kAborted`, and `kUnavailable`. + kUnavailable = 14, + + // StatusCode::kDataLoss + // + // kDataLoss (gRPC code "DATA_LOSS") indicates that unrecoverable data loss or + // corruption has occurred. As this error is serious, proper alerting should + // be attached to errors such as this. + kDataLoss = 15, + + // StatusCode::kUnauthenticated + // + // kUnauthenticated (gRPC code "UNAUTHENTICATED") indicates that the request + // does not have valid authentication credentials for the operation. Correct + // the authentication and try again. + kUnauthenticated = 16, + + // StatusCode::DoNotUseReservedForFutureExpansionUseDefaultInSwitchInstead_ + // + // NOTE: this error code entry should not be used and you should not rely on + // its value, which may change. + // + // The purpose of this enumerated value is to force people who handle status + // codes with `switch()` statements to *not* simply enumerate all possible + // values, but instead provide a "default:" case. Providing such a default + // case ensures that code will compile when new codes are added. + kDoNotUseReservedForFutureExpansionUseDefaultInSwitchInstead_ = 20 + }; + + // StatusCodeToString() + // + // Returns the name for the status code, or "" if it is an unknown value. + std::string StatusCodeToString(StatusCode code); + + // operator<< + // + // Streams StatusCodeToString(code) to `os`. + std::ostream& operator<<(std::ostream& os, StatusCode code); + + // absl::StatusToStringMode + // + // An `absl::StatusToStringMode` is an enumerated type indicating how + // `absl::Status::ToString()` should construct the output string for a non-ok + // status. + enum class StatusToStringMode : int + { + // ToString will not contain any extra data (such as payloads). It will only + // contain the error code and message, if any. + kWithNoExtraData = 0, + // ToString will contain the payloads. + kWithPayload = 1 << 0, + // ToString will include all the extra data this Status has. + kWithEverything = ~kWithNoExtraData, + // Default mode used by ToString. Its exact value might change in the future. + kDefault = kWithPayload, + }; + + // absl::StatusToStringMode is specified as a bitmask type, which means the + // following operations must be provided: + inline constexpr StatusToStringMode operator&(StatusToStringMode lhs, StatusToStringMode rhs) + { + return static_cast(static_cast(lhs) & static_cast(rhs)); + } + inline constexpr StatusToStringMode operator|(StatusToStringMode lhs, StatusToStringMode rhs) + { + return static_cast(static_cast(lhs) | static_cast(rhs)); + } + inline constexpr StatusToStringMode operator^(StatusToStringMode lhs, StatusToStringMode rhs) + { + return static_cast(static_cast(lhs) ^ static_cast(rhs)); + } + inline constexpr StatusToStringMode operator~(StatusToStringMode arg) + { + return static_cast(~static_cast(arg)); + } + inline StatusToStringMode& operator&=(StatusToStringMode& lhs, StatusToStringMode rhs) + { + lhs = lhs & rhs; + return lhs; + } + inline StatusToStringMode& operator|=(StatusToStringMode& lhs, StatusToStringMode rhs) + { + lhs = lhs | rhs; + return lhs; + } + inline StatusToStringMode& operator^=(StatusToStringMode& lhs, StatusToStringMode rhs) + { + lhs = lhs ^ rhs; + return lhs; + } + + // absl::Status + // + // The `absl::Status` class is generally used to gracefully handle errors + // across API boundaries (and in particular across RPC boundaries). Some of + // these errors may be recoverable, but others may not. Most + // functions which can produce a recoverable error should be designed to return + // either an `absl::Status` (or the similar `absl::StatusOr`, which holds + // either an object of type `T` or an error). + // + // API developers should construct their functions to return `absl::OkStatus()` + // upon success, or an `absl::StatusCode` upon another type of error (e.g + // an `absl::StatusCode::kInvalidArgument` error). The API provides convenience + // functions to construct each status code. + // + // Example: + // + // absl::Status myFunction(absl::string_view fname, ...) { + // ... + // // encounter error + // if (error condition) { + // // Construct an absl::StatusCode::kInvalidArgument error + // return absl::InvalidArgumentError("bad mode"); + // } + // // else, return OK + // return absl::OkStatus(); + // } + // + // Users handling status error codes should prefer checking for an OK status + // using the `ok()` member function. Handling multiple error codes may justify + // use of switch statement, but only check for error codes you know how to + // handle; do not try to exhaustively match against all canonical error codes. + // Errors that cannot be handled should be logged and/or propagated for higher + // levels to deal with. If you do use a switch statement, make sure that you + // also provide a `default:` switch case, so that code does not break as other + // canonical codes are added to the API. + // + // Example: + // + // absl::Status result = DoSomething(); + // if (!result.ok()) { + // LOG(ERROR) << result; + // } + // + // // Provide a default if switching on multiple error codes + // switch (result.code()) { + // // The user hasn't authenticated. Ask them to reauth + // case absl::StatusCode::kUnauthenticated: + // DoReAuth(); + // break; + // // The user does not have permission. Log an error. + // case absl::StatusCode::kPermissionDenied: + // LOG(ERROR) << result; + // break; + // // Propagate the error otherwise. + // default: + // return true; + // } + // + // An `absl::Status` can optionally include a payload with more information + // about the error. Typically, this payload serves one of several purposes: + // + // * It may provide more fine-grained semantic information about the error to + // facilitate actionable remedies. + // * It may provide human-readable contextual information that is more + // appropriate to display to an end user. + // + // Example: + // + // absl::Status result = DoSomething(); + // // Inform user to retry after 30 seconds + // // See more error details in googleapis/google/rpc/error_details.proto + // if (absl::IsResourceExhausted(result)) { + // google::rpc::RetryInfo info; + // info.retry_delay().seconds() = 30; + // // Payloads require a unique key (a URL to ensure no collisions with + // // other payloads), and an `absl::Cord` to hold the encoded data. + // absl::string_view url = "type.googleapis.com/google.rpc.RetryInfo"; + // result.SetPayload(url, info.SerializeAsCord()); + // return result; + // } + // + // For documentation see https://abseil.io/docs/cpp/guides/status. + // + // Returned Status objects may not be ignored. status_internal.h has a forward + // declaration of the form + // class ABSL_MUST_USE_RESULT Status; + class Status final + { + public: + // Constructors + + // This default constructor creates an OK status with no message or payload. + // Avoid this constructor and prefer explicit construction of an OK status + // with `absl::OkStatus()`. + Status(); + + // Creates a status in the canonical error space with the specified + // `absl::StatusCode` and error message. If `code == absl::StatusCode::kOk`, // NOLINT + // `msg` is ignored and an object identical to an OK status is constructed. + // + // The `msg` string must be in UTF-8. The implementation may complain (e.g., // NOLINT + // by printing a warning) if it is not. + Status(absl::StatusCode code, absl::string_view msg); + + Status(const Status&); + Status& operator=(const Status& x); + + // Move operators + + // The moved-from state is valid but unspecified. + Status(Status&&) noexcept; + Status& operator=(Status&&); + + ~Status(); + + // Status::Update() + // + // Updates the existing status with `new_status` provided that `this->ok()`. + // If the existing status already contains a non-OK error, this update has no + // effect and preserves the current data. Note that this behavior may change + // in the future to augment a current non-ok status with additional + // information about `new_status`. + // + // `Update()` provides a convenient way of keeping track of the first error + // encountered. + // + // Example: + // // Instead of "if (overall_status.ok()) overall_status = new_status" + // overall_status.Update(new_status); + // + void Update(const Status& new_status); + void Update(Status&& new_status); + + // Status::ok() + // + // Returns `true` if `this->code()` == `absl::StatusCode::kOk`, + // indicating the absence of an error. + // Prefer checking for an OK status using this member function. + ABSL_MUST_USE_RESULT bool ok() const; + + // Status::code() + // + // Returns the canonical error code of type `absl::StatusCode` of this status. + absl::StatusCode code() const; + + // Status::raw_code() + // + // Returns a raw (canonical) error code corresponding to the enum value of + // `google.rpc.Code` definitions within + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto. + // These values could be out of the range of canonical `absl::StatusCode` + // enum values. + // + // NOTE: This function should only be called when converting to an associated + // wire format. Use `Status::code()` for error handling. + int raw_code() const; + + // Status::message() + // + // Returns the error message associated with this error code, if available. + // Note that this message rarely describes the error code. It is not unusual + // for the error message to be the empty string. As a result, prefer + // `operator<<` or `Status::ToString()` for debug logging. + absl::string_view message() const; + + friend bool operator==(const Status&, const Status&); + friend bool operator!=(const Status&, const Status&); + + // Status::ToString() + // + // Returns a string based on the `mode`. By default, it returns combination of + // the error code name, the message and any associated payload messages. This + // string is designed simply to be human readable and its exact format should + // not be load bearing. Do not depend on the exact format of the result of + // `ToString()` which is subject to change. + // + // The printed code name and the message are generally substrings of the + // result, and the payloads to be printed use the status payload printer + // mechanism (which is internal). + std::string ToString( + StatusToStringMode mode = StatusToStringMode::kDefault + ) const; + + // Status::IgnoreError() + // + // Ignores any errors. This method does nothing except potentially suppress + // complaints from any tools that are checking that errors are not dropped on + // the floor. + void IgnoreError() const; + + // swap() + // + // Swap the contents of one status with another. + friend void swap(Status& a, Status& b); + + //---------------------------------------------------------------------------- + // Payload Management APIs + //---------------------------------------------------------------------------- + + // A payload may be attached to a status to provide additional context to an + // error that may not be satisfied by an existing `absl::StatusCode`. + // Typically, this payload serves one of several purposes: + // + // * It may provide more fine-grained semantic information about the error + // to facilitate actionable remedies. + // * It may provide human-readable contextual information that is more + // appropriate to display to an end user. + // + // A payload consists of a [key,value] pair, where the key is a string + // referring to a unique "type URL" and the value is an object of type + // `absl::Cord` to hold the contextual data. + // + // The "type URL" should be unique and follow the format of a URL + // (https://en.wikipedia.org/wiki/URL) and, ideally, provide some + // documentation or schema on how to interpret its associated data. For + // example, the default type URL for a protobuf message type is + // "type.googleapis.com/packagename.messagename". Other custom wire formats + // should define the format of type URL in a similar practice so as to + // minimize the chance of conflict between type URLs. + // Users should ensure that the type URL can be mapped to a concrete + // C++ type if they want to deserialize the payload and read it effectively. + // + // To attach a payload to a status object, call `Status::SetPayload()`, + // passing it the type URL and an `absl::Cord` of associated data. Similarly, + // to extract the payload from a status, call `Status::GetPayload()`. You + // may attach multiple payloads (with differing type URLs) to any given + // status object, provided that the status is currently exhibiting an error + // code (i.e. is not OK). + + // Status::GetPayload() + // + // Gets the payload of a status given its unique `type_url` key, if present. + absl::optional GetPayload(absl::string_view type_url) const; + + // Status::SetPayload() + // + // Sets the payload for a non-ok status using a `type_url` key, overwriting + // any existing payload for that `type_url`. + // + // NOTE: This function does nothing if the Status is ok. + void SetPayload(absl::string_view type_url, absl::Cord payload); + + // Status::ErasePayload() + // + // Erases the payload corresponding to the `type_url` key. Returns `true` if + // the payload was present. + bool ErasePayload(absl::string_view type_url); + + // Status::ForEachPayload() + // + // Iterates over the stored payloads and calls the + // `visitor(type_key, payload)` callable for each one. + // + // NOTE: The order of calls to `visitor()` is not specified and may change at + // any time. + // + // NOTE: Any mutation on the same 'absl::Status' object during visitation is + // forbidden and could result in undefined behavior. + void ForEachPayload( + absl::FunctionRef visitor + ) + const; + + private: + friend Status CancelledError(); + + // Creates a status in the canonical error space with the specified + // code, and an empty error message. + explicit Status(absl::StatusCode code); + + static void UnrefNonInlined(uintptr_t rep); + static void Ref(uintptr_t rep); + static void Unref(uintptr_t rep); + + // REQUIRES: !ok() + // Ensures rep_ is not shared with any other Status. + void PrepareToModify(); + + const status_internal::Payloads* GetPayloads() const; + status_internal::Payloads* GetPayloads(); + + static bool EqualsSlow(const absl::Status& a, const absl::Status& b); + + // MSVC 14.0 limitation requires the const. + static constexpr const char kMovedFromString[] = + "Status accessed after move."; + + static const std::string* EmptyString(); + static const std::string* MovedFromString(); + + // Returns whether rep contains an inlined representation. + // See rep_ for details. + static bool IsInlined(uintptr_t rep); + + // Indicates whether this Status was the rhs of a move operation. See rep_ + // for details. + static bool IsMovedFrom(uintptr_t rep); + static uintptr_t MovedFromRep(); + + // Convert between error::Code and the inlined uintptr_t representation used + // by rep_. See rep_ for details. + static uintptr_t CodeToInlinedRep(absl::StatusCode code); + static absl::StatusCode InlinedRepToCode(uintptr_t rep); + + // Converts between StatusRep* and the external uintptr_t representation used + // by rep_. See rep_ for details. + static uintptr_t PointerToRep(status_internal::StatusRep* r); + static status_internal::StatusRep* RepToPointer(uintptr_t r); + + std::string ToStringSlow(StatusToStringMode mode) const; + + // Status supports two different representations. + // - When the low bit is off it is an inlined representation. + // It uses the canonical error space, no message or payload. + // The error code is (rep_ >> 2). + // The (rep_ & 2) bit is the "moved from" indicator, used in IsMovedFrom(). + // - When the low bit is on it is an external representation. + // In this case all the data comes from a heap allocated Rep object. + // (rep_ - 1) is a status_internal::StatusRep* pointer to that structure. + uintptr_t rep_; + }; + + // OkStatus() + // + // Returns an OK status, equivalent to a default constructed instance. Prefer + // usage of `absl::OkStatus()` when constructing such an OK status. + Status OkStatus(); + + // operator<<() + // + // Prints a human-readable representation of `x` to `os`. + std::ostream& operator<<(std::ostream& os, const Status& x); + + // IsAborted() + // IsAlreadyExists() + // IsCancelled() + // IsDataLoss() + // IsDeadlineExceeded() + // IsFailedPrecondition() + // IsInternal() + // IsInvalidArgument() + // IsNotFound() + // IsOutOfRange() + // IsPermissionDenied() + // IsResourceExhausted() + // IsUnauthenticated() + // IsUnavailable() + // IsUnimplemented() + // IsUnknown() + // + // These convenience functions return `true` if a given status matches the + // `absl::StatusCode` error code of its associated function. + ABSL_MUST_USE_RESULT bool IsAborted(const Status& status); + ABSL_MUST_USE_RESULT bool IsAlreadyExists(const Status& status); + ABSL_MUST_USE_RESULT bool IsCancelled(const Status& status); + ABSL_MUST_USE_RESULT bool IsDataLoss(const Status& status); + ABSL_MUST_USE_RESULT bool IsDeadlineExceeded(const Status& status); + ABSL_MUST_USE_RESULT bool IsFailedPrecondition(const Status& status); + ABSL_MUST_USE_RESULT bool IsInternal(const Status& status); + ABSL_MUST_USE_RESULT bool IsInvalidArgument(const Status& status); + ABSL_MUST_USE_RESULT bool IsNotFound(const Status& status); + ABSL_MUST_USE_RESULT bool IsOutOfRange(const Status& status); + ABSL_MUST_USE_RESULT bool IsPermissionDenied(const Status& status); + ABSL_MUST_USE_RESULT bool IsResourceExhausted(const Status& status); + ABSL_MUST_USE_RESULT bool IsUnauthenticated(const Status& status); + ABSL_MUST_USE_RESULT bool IsUnavailable(const Status& status); + ABSL_MUST_USE_RESULT bool IsUnimplemented(const Status& status); + ABSL_MUST_USE_RESULT bool IsUnknown(const Status& status); + + // AbortedError() + // AlreadyExistsError() + // CancelledError() + // DataLossError() + // DeadlineExceededError() + // FailedPreconditionError() + // InternalError() + // InvalidArgumentError() + // NotFoundError() + // OutOfRangeError() + // PermissionDeniedError() + // ResourceExhaustedError() + // UnauthenticatedError() + // UnavailableError() + // UnimplementedError() + // UnknownError() + // + // These convenience functions create an `absl::Status` object with an error + // code as indicated by the associated function name, using the error message + // passed in `message`. + Status AbortedError(absl::string_view message); + Status AlreadyExistsError(absl::string_view message); + Status CancelledError(absl::string_view message); + Status DataLossError(absl::string_view message); + Status DeadlineExceededError(absl::string_view message); + Status FailedPreconditionError(absl::string_view message); + Status InternalError(absl::string_view message); + Status InvalidArgumentError(absl::string_view message); + Status NotFoundError(absl::string_view message); + Status OutOfRangeError(absl::string_view message); + Status PermissionDeniedError(absl::string_view message); + Status ResourceExhaustedError(absl::string_view message); + Status UnauthenticatedError(absl::string_view message); + Status UnavailableError(absl::string_view message); + Status UnimplementedError(absl::string_view message); + Status UnknownError(absl::string_view message); + + // ErrnoToStatusCode() + // + // Returns the StatusCode for `error_number`, which should be an `errno` value. + // See https://en.cppreference.com/w/cpp/error/errno_macros and similar + // references. + absl::StatusCode ErrnoToStatusCode(int error_number); + + // ErrnoToStatus() + // + // Convenience function that creates a `absl::Status` using an `error_number`, + // which should be an `errno` value. + Status ErrnoToStatus(int error_number, absl::string_view message); + + //------------------------------------------------------------------------------ + // Implementation details follow + //------------------------------------------------------------------------------ + + inline Status::Status() : + rep_(CodeToInlinedRep(absl::StatusCode::kOk)) + { + } + + inline Status::Status(absl::StatusCode code) : + rep_(CodeToInlinedRep(code)) + { + } + + inline Status::Status(const Status& x) : + rep_(x.rep_) + { + Ref(rep_); + } + + inline Status& Status::operator=(const Status& x) + { + uintptr_t old_rep = rep_; + if (x.rep_ != old_rep) + { + Ref(x.rep_); + rep_ = x.rep_; + Unref(old_rep); + } + return *this; + } + + inline Status::Status(Status&& x) noexcept : + rep_(x.rep_) + { + x.rep_ = MovedFromRep(); + } + + inline Status& Status::operator=(Status&& x) + { + uintptr_t old_rep = rep_; + if (x.rep_ != old_rep) + { + rep_ = x.rep_; + x.rep_ = MovedFromRep(); + Unref(old_rep); + } + return *this; + } + + inline void Status::Update(const Status& new_status) + { + if (ok()) + { + *this = new_status; + } + } + + inline void Status::Update(Status&& new_status) + { + if (ok()) + { + *this = std::move(new_status); + } + } + + inline Status::~Status() + { + Unref(rep_); + } + + inline bool Status::ok() const + { + return rep_ == CodeToInlinedRep(absl::StatusCode::kOk); + } + + inline absl::string_view Status::message() const + { + return !IsInlined(rep_) ? RepToPointer(rep_)->message : (IsMovedFrom(rep_) ? absl::string_view(kMovedFromString) : absl::string_view()); + } + + inline bool operator==(const Status& lhs, const Status& rhs) + { + return lhs.rep_ == rhs.rep_ || Status::EqualsSlow(lhs, rhs); + } + + inline bool operator!=(const Status& lhs, const Status& rhs) + { + return !(lhs == rhs); + } + + inline std::string Status::ToString(StatusToStringMode mode) const + { + return ok() ? "OK" : ToStringSlow(mode); + } + + inline void Status::IgnoreError() const + { + // no-op + } + + inline void swap(absl::Status& a, absl::Status& b) + { + using std::swap; + swap(a.rep_, b.rep_); + } + + inline const status_internal::Payloads* Status::GetPayloads() const + { + return IsInlined(rep_) ? nullptr : RepToPointer(rep_)->payloads.get(); + } + + inline status_internal::Payloads* Status::GetPayloads() + { + return IsInlined(rep_) ? nullptr : RepToPointer(rep_)->payloads.get(); + } + + inline bool Status::IsInlined(uintptr_t rep) + { + return (rep & 1) == 0; + } + + inline bool Status::IsMovedFrom(uintptr_t rep) + { + return IsInlined(rep) && (rep & 2) != 0; + } + + inline uintptr_t Status::MovedFromRep() + { + return CodeToInlinedRep(absl::StatusCode::kInternal) | 2; + } + + inline uintptr_t Status::CodeToInlinedRep(absl::StatusCode code) + { + return static_cast(code) << 2; + } + + inline absl::StatusCode Status::InlinedRepToCode(uintptr_t rep) + { + assert(IsInlined(rep)); + return static_cast(rep >> 2); + } + + inline status_internal::StatusRep* Status::RepToPointer(uintptr_t rep) + { + assert(!IsInlined(rep)); + return reinterpret_cast(rep - 1); + } + + inline uintptr_t Status::PointerToRep(status_internal::StatusRep* rep) + { + return reinterpret_cast(rep) + 1; + } + + inline void Status::Ref(uintptr_t rep) + { + if (!IsInlined(rep)) + { + RepToPointer(rep)->ref.fetch_add(1, std::memory_order_relaxed); + } + } + + inline void Status::Unref(uintptr_t rep) + { + if (!IsInlined(rep)) + { + UnrefNonInlined(rep); + } + } + + inline Status OkStatus() + { + return Status(); + } + + // Creates a `Status` object with the `absl::StatusCode::kCancelled` error code + // and an empty message. It is provided only for efficiency, given that + // message-less kCancelled errors are common in the infrastructure. + inline Status CancelledError() + { + return Status(absl::StatusCode::kCancelled); + } + + // Retrieves a message's status as a null terminated C string. The lifetime of + // this string is tied to the lifetime of the status object itself. + // + // If the status's message is empty, the empty string is returned. + // + // StatusMessageAsCStr exists for C support. Use `status.message()` in C++. + const char* StatusMessageAsCStr( + const Status& status ABSL_ATTRIBUTE_LIFETIME_BOUND + ); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STATUS_STATUS_H_ diff --git a/CAPI/cpp/grpc/include/absl/status/status_payload_printer.h b/CAPI/cpp/grpc/include/absl/status/status_payload_printer.h new file mode 100644 index 00000000..2187ee06 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/status/status_payload_printer.h @@ -0,0 +1,52 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_ +#define ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_ + +#include + +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace status_internal + { + + // By default, `Status::ToString` and `operator<<(Status)` print a payload by + // dumping the type URL and the raw bytes. To help debugging, we provide an + // extension point, which is a global printer function that can be set by users + // to specify how to print payloads. The function takes the type URL and the + // payload as input, and should return a valid human-readable string on success + // or `absl::nullopt` on failure (in which case it falls back to the default + // approach of printing the raw bytes). + // NOTE: This is an internal API and the design is subject to change in the + // future in a non-backward-compatible way. Since it's only meant for debugging + // purpose, you should not rely on it in any critical logic. + using StatusPayloadPrinter = absl::optional (*)(absl::string_view, const absl::Cord&); + + // Sets the global payload printer. Only one printer should be set per process. + // If multiple printers are set, it's undefined which one will be used. + void SetStatusPayloadPrinter(StatusPayloadPrinter); + + // Returns the global payload printer if previously set, otherwise `nullptr`. + StatusPayloadPrinter GetStatusPayloadPrinter(); + + } // namespace status_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STATUS_STATUS_PAYLOAD_PRINTER_H_ diff --git a/CAPI/cpp/grpc/include/absl/status/statusor.h b/CAPI/cpp/grpc/include/absl/status/statusor.h new file mode 100644 index 00000000..b20a25af --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/status/statusor.h @@ -0,0 +1,879 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: statusor.h +// ----------------------------------------------------------------------------- +// +// An `absl::StatusOr` represents a union of an `absl::Status` object +// and an object of type `T`. The `absl::StatusOr` will either contain an +// object of type `T` (indicating a successful operation), or an error (of type +// `absl::Status`) explaining why such a value is not present. +// +// In general, check the success of an operation returning an +// `absl::StatusOr` like you would an `absl::Status` by using the `ok()` +// member function. +// +// Example: +// +// StatusOr result = Calculation(); +// if (result.ok()) { +// result->DoSomethingCool(); +// } else { +// LOG(ERROR) << result.status(); +// } +#ifndef ABSL_STATUS_STATUSOR_H_ +#define ABSL_STATUS_STATUSOR_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/meta/type_traits.h" +#include "absl/status/internal/statusor_internal.h" +#include "absl/status/status.h" +#include "absl/types/variant.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // BadStatusOrAccess + // + // This class defines the type of object to throw (if exceptions are enabled), + // when accessing the value of an `absl::StatusOr` object that does not + // contain a value. This behavior is analogous to that of + // `std::bad_optional_access` in the case of accessing an invalid + // `std::optional` value. + // + // Example: + // + // try { + // absl::StatusOr v = FetchInt(); + // DoWork(v.value()); // Accessing value() when not "OK" may throw + // } catch (absl::BadStatusOrAccess& ex) { + // LOG(ERROR) << ex.status(); + // } + class BadStatusOrAccess : public std::exception + { + public: + explicit BadStatusOrAccess(absl::Status status); + ~BadStatusOrAccess() override = default; + + BadStatusOrAccess(const BadStatusOrAccess& other); + BadStatusOrAccess& operator=(const BadStatusOrAccess& other); + BadStatusOrAccess(BadStatusOrAccess&& other); + BadStatusOrAccess& operator=(BadStatusOrAccess&& other); + + // BadStatusOrAccess::what() + // + // Returns the associated explanatory string of the `absl::StatusOr` + // object's error code. This function contains information about the failing + // status, but its exact formatting may change and should not be depended on. + // + // The pointer of this string is guaranteed to be valid until any non-const + // function is invoked on the exception object. + const char* what() const noexcept override; + + // BadStatusOrAccess::status() + // + // Returns the associated `absl::Status` of the `absl::StatusOr` object's + // error. + const absl::Status& status() const; + + private: + void InitWhat() const; + + absl::Status status_; + mutable absl::once_flag init_what_; + mutable std::string what_; + }; + + // Returned StatusOr objects may not be ignored. + template +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) + // TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict + // [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. + class [[nodiscard]] StatusOr; +#else + class ABSL_MUST_USE_RESULT StatusOr; +#endif // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) + + // absl::StatusOr + // + // The `absl::StatusOr` class template is a union of an `absl::Status` object + // and an object of type `T`. The `absl::StatusOr` models an object that is + // either a usable object, or an error (of type `absl::Status`) explaining why + // such an object is not present. An `absl::StatusOr` is typically the return + // value of a function which may fail. + // + // An `absl::StatusOr` can never hold an "OK" status (an + // `absl::StatusCode::kOk` value); instead, the presence of an object of type + // `T` indicates success. Instead of checking for a `kOk` value, use the + // `absl::StatusOr::ok()` member function. (It is for this reason, and code + // readability, that using the `ok()` function is preferred for `absl::Status` + // as well.) + // + // Example: + // + // StatusOr result = DoBigCalculationThatCouldFail(); + // if (result.ok()) { + // result->DoSomethingCool(); + // } else { + // LOG(ERROR) << result.status(); + // } + // + // Accessing the object held by an `absl::StatusOr` should be performed via + // `operator*` or `operator->`, after a call to `ok()` confirms that the + // `absl::StatusOr` holds an object of type `T`: + // + // Example: + // + // absl::StatusOr i = GetCount(); + // if (i.ok()) { + // updated_total += *i; + // } + // + // NOTE: using `absl::StatusOr::value()` when no valid value is present will + // throw an exception if exceptions are enabled or terminate the process when + // exceptions are not enabled. + // + // Example: + // + // StatusOr result = DoBigCalculationThatCouldFail(); + // const Foo& foo = result.value(); // Crash/exception if no value present + // foo.DoSomethingCool(); + // + // A `absl::StatusOr` can be constructed from a null pointer like any other + // pointer value, and the result will be that `ok()` returns `true` and + // `value()` returns `nullptr`. Checking the value of pointer in an + // `absl::StatusOr` generally requires a bit more care, to ensure both that + // a value is present and that value is not null: + // + // StatusOr> result = FooFactory::MakeNewFoo(arg); + // if (!result.ok()) { + // LOG(ERROR) << result.status(); + // } else if (*result == nullptr) { + // LOG(ERROR) << "Unexpected null pointer"; + // } else { + // (*result)->DoSomethingCool(); + // } + // + // Example factory implementation returning StatusOr: + // + // StatusOr FooFactory::MakeFoo(int arg) { + // if (arg <= 0) { + // return absl::Status(absl::StatusCode::kInvalidArgument, + // "Arg must be positive"); + // } + // return Foo(arg); + // } + template + class StatusOr : private internal_statusor::StatusOrData, private internal_statusor::CopyCtorBase, private internal_statusor::MoveCtorBase, private internal_statusor::CopyAssignBase, private internal_statusor::MoveAssignBase + { + template + friend class StatusOr; + + typedef internal_statusor::StatusOrData Base; + + public: + // StatusOr::value_type + // + // This instance data provides a generic `value_type` member for use within + // generic programming. This usage is analogous to that of + // `optional::value_type` in the case of `std::optional`. + typedef T value_type; + + // Constructors + + // Constructs a new `absl::StatusOr` with an `absl::StatusCode::kUnknown` + // status. This constructor is marked 'explicit' to prevent usages in return + // values such as 'return {};', under the misconception that + // `absl::StatusOr>` will be initialized with an empty + // vector, instead of an `absl::StatusCode::kUnknown` error code. + explicit StatusOr(); + + // `StatusOr` is copy constructible if `T` is copy constructible. + StatusOr(const StatusOr&) = default; + // `StatusOr` is copy assignable if `T` is copy constructible and copy + // assignable. + StatusOr& operator=(const StatusOr&) = default; + + // `StatusOr` is move constructible if `T` is move constructible. + StatusOr(StatusOr&&) = default; + // `StatusOr` is moveAssignable if `T` is move constructible and move + // assignable. + StatusOr& operator=(StatusOr&&) = default; + + // Converting Constructors + + // Constructs a new `absl::StatusOr` from an `absl::StatusOr`, when `T` + // is constructible from `U`. To avoid ambiguity, these constructors are + // disabled if `T` is also constructible from `StatusOr.`. This constructor + // is explicit if and only if the corresponding construction of `T` from `U` + // is explicit. (This constructor inherits its explicitness from the + // underlying constructor.) + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + std::is_convertible, + absl::negation< + internal_statusor::IsConstructibleOrConvertibleFromStatusOr< + T, + U>>>::value, + int> = 0> + StatusOr(const StatusOr& other) // NOLINT + : + Base(static_cast::Base&>(other)) + { + } + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + absl::negation>, + absl::negation< + internal_statusor::IsConstructibleOrConvertibleFromStatusOr< + T, + U>>>::value, + int> = 0> + explicit StatusOr(const StatusOr& other) : + Base(static_cast::Base&>(other)) + { + } + + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + std::is_convertible, + absl::negation< + internal_statusor::IsConstructibleOrConvertibleFromStatusOr< + T, + U>>>::value, + int> = 0> + StatusOr(StatusOr&& other) // NOLINT + : + Base(static_cast::Base&&>(other)) + { + } + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + absl::negation>, + absl::negation< + internal_statusor::IsConstructibleOrConvertibleFromStatusOr< + T, + U>>>::value, + int> = 0> + explicit StatusOr(StatusOr&& other) : + Base(static_cast::Base&&>(other)) + { + } + + // Converting Assignment Operators + + // Creates an `absl::StatusOr` through assignment from an + // `absl::StatusOr` when: + // + // * Both `absl::StatusOr` and `absl::StatusOr` are OK by assigning + // `U` to `T` directly. + // * `absl::StatusOr` is OK and `absl::StatusOr` contains an error + // code by destroying `absl::StatusOr`'s value and assigning from + // `absl::StatusOr' + // * `absl::StatusOr` contains an error code and `absl::StatusOr` is + // OK by directly initializing `T` from `U`. + // * Both `absl::StatusOr` and `absl::StatusOr` contain an error + // code by assigning the `Status` in `absl::StatusOr` to + // `absl::StatusOr` + // + // These overloads only apply if `absl::StatusOr` is constructible and + // assignable from `absl::StatusOr` and `StatusOr` cannot be directly + // assigned from `StatusOr`. + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + std::is_assignable, + absl::negation< + internal_statusor:: + IsConstructibleOrConvertibleOrAssignableFromStatusOr< + T, + U>>>::value, + int> = 0> + StatusOr& operator=(const StatusOr& other) + { + this->Assign(other); + return *this; + } + template< + typename U, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + std::is_assignable, + absl::negation< + internal_statusor:: + IsConstructibleOrConvertibleOrAssignableFromStatusOr< + T, + U>>>::value, + int> = 0> + StatusOr& operator=(StatusOr&& other) + { + this->Assign(std::move(other)); + return *this; + } + + // Constructs a new `absl::StatusOr` with a non-ok status. After calling + // this constructor, `this->ok()` will be `false` and calls to `value()` will + // crash, or produce an exception if exceptions are enabled. + // + // The constructor also takes any type `U` that is convertible to + // `absl::Status`. This constructor is explicit if an only if `U` is not of + // type `absl::Status` and the conversion from `U` to `Status` is explicit. + // + // REQUIRES: !Status(std::forward(v)).ok(). This requirement is DCHECKed. + // In optimized builds, passing absl::OkStatus() here will have the effect + // of passing absl::StatusCode::kInternal as a fallback. + template< + typename U = absl::Status, + absl::enable_if_t< + absl::conjunction< + std::is_convertible, + std::is_constructible, + absl::negation, absl::StatusOr>>, + absl::negation, T>>, + absl::negation, absl::in_place_t>>, + absl::negation>>::value, + int> = 0> + StatusOr(U&& v) : + Base(std::forward(v)) + { + } + + template< + typename U = absl::Status, + absl::enable_if_t< + absl::conjunction< + absl::negation>, + std::is_constructible, + absl::negation, absl::StatusOr>>, + absl::negation, T>>, + absl::negation, absl::in_place_t>>, + absl::negation>>::value, + int> = 0> + explicit StatusOr(U&& v) : + Base(std::forward(v)) + { + } + + template< + typename U = absl::Status, + absl::enable_if_t< + absl::conjunction< + std::is_convertible, + std::is_constructible, + absl::negation, absl::StatusOr>>, + absl::negation, T>>, + absl::negation, absl::in_place_t>>, + absl::negation>>::value, + int> = 0> + StatusOr& operator=(U&& v) + { + this->AssignStatus(std::forward(v)); + return *this; + } + + // Perfect-forwarding value assignment operator. + + // If `*this` contains a `T` value before the call, the contained value is + // assigned from `std::forward(v)`; Otherwise, it is directly-initialized + // from `std::forward(v)`. + // This function does not participate in overload unless: + // 1. `std::is_constructible_v` is true, + // 2. `std::is_assignable_v` is true. + // 3. `std::is_same_v, std::remove_cvref_t>` is false. + // 4. Assigning `U` to `T` is not ambiguous: + // If `U` is `StatusOr` and `T` is constructible and assignable from + // both `StatusOr` and `V`, the assignment is considered bug-prone and + // ambiguous thus will fail to compile. For example: + // StatusOr s1 = true; // s1.ok() && *s1 == true + // StatusOr s2 = false; // s2.ok() && *s2 == false + // s1 = s2; // ambiguous, `s1 = *s2` or `s1 = bool(s2)`? + template< + typename U = T, + typename = typename std::enable_if, + std::is_assignable, + absl::disjunction< + std::is_same, T>, + absl::conjunction< + absl::negation>, + absl::negation>>>, + internal_statusor::IsForwardingAssignmentValid>::value>::type> + StatusOr& operator=(U&& v) + { + this->Assign(std::forward(v)); + return *this; + } + + // Constructs the inner value `T` in-place using the provided args, using the + // `T(args...)` constructor. + template + explicit StatusOr(absl::in_place_t, Args&&... args); + template + explicit StatusOr(absl::in_place_t, std::initializer_list ilist, Args&&... args); + + // Constructs the inner value `T` in-place using the provided args, using the + // `T(U)` (direct-initialization) constructor. This constructor is only valid + // if `T` can be constructed from a `U`. Can accept move or copy constructors. + // + // This constructor is explicit if `U` is not convertible to `T`. To avoid + // ambiguity, this constructor is disabled if `U` is a `StatusOr`, where + // `J` is convertible to `T`. + template< + typename U = T, + absl::enable_if_t< + absl::conjunction< + internal_statusor::IsDirectInitializationValid, + std::is_constructible, + std::is_convertible, + absl::disjunction< + std::is_same, T>, + absl::conjunction< + absl::negation>, + absl::negation< + internal_statusor::HasConversionOperatorToStatusOr< + T, + U&&>>>>>::value, + int> = 0> + StatusOr(U&& u) // NOLINT + : + StatusOr(absl::in_place, std::forward(u)) + { + } + + template< + typename U = T, + absl::enable_if_t< + absl::conjunction< + internal_statusor::IsDirectInitializationValid, + absl::disjunction< + std::is_same, T>, + absl::conjunction< + absl::negation>, + absl::negation< + internal_statusor::HasConversionOperatorToStatusOr< + T, + U&&>>>>, + std::is_constructible, + absl::negation>>::value, + int> = 0> + explicit StatusOr(U&& u) // NOLINT + : + StatusOr(absl::in_place, std::forward(u)) + { + } + + // StatusOr::ok() + // + // Returns whether or not this `absl::StatusOr` holds a `T` value. This + // member function is analogous to `absl::Status::ok()` and should be used + // similarly to check the status of return values. + // + // Example: + // + // StatusOr result = DoBigCalculationThatCouldFail(); + // if (result.ok()) { + // // Handle result + // else { + // // Handle error + // } + ABSL_MUST_USE_RESULT bool ok() const + { + return this->status_.ok(); + } + + // StatusOr::status() + // + // Returns a reference to the current `absl::Status` contained within the + // `absl::StatusOr`. If `absl::StatusOr` contains a `T`, then this + // function returns `absl::OkStatus()`. + const Status& status() const&; + Status status() &&; + + // StatusOr::value() + // + // Returns a reference to the held value if `this->ok()`. Otherwise, throws + // `absl::BadStatusOrAccess` if exceptions are enabled, or is guaranteed to + // terminate the process if exceptions are disabled. + // + // If you have already checked the status using `this->ok()`, you probably + // want to use `operator*()` or `operator->()` to access the value instead of + // `value`. + // + // Note: for value types that are cheap to copy, prefer simple code: + // + // T value = statusor.value(); + // + // Otherwise, if the value type is expensive to copy, but can be left + // in the StatusOr, simply assign to a reference: + // + // T& value = statusor.value(); // or `const T&` + // + // Otherwise, if the value type supports an efficient move, it can be + // used as follows: + // + // T value = std::move(statusor).value(); + // + // The `std::move` on statusor instead of on the whole expression enables + // warnings about possible uses of the statusor object after the move. + const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND; + T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND; + const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND; + T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND; + + // StatusOr:: operator*() + // + // Returns a reference to the current value. + // + // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined. + // + // Use `this->ok()` to verify that there is a current value within the + // `absl::StatusOr`. Alternatively, see the `value()` member function for a + // similar API that guarantees crashing or throwing an exception if there is + // no current value. + const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND; + T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND; + const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND; + T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND; + + // StatusOr::operator->() + // + // Returns a pointer to the current value. + // + // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined. + // + // Use `this->ok()` to verify that there is a current value. + const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND; + T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND; + + // StatusOr::value_or() + // + // Returns the current value if `this->ok() == true`. Otherwise constructs a + // value using the provided `default_value`. + // + // Unlike `value`, this function returns by value, copying the current value + // if necessary. If the value type supports an efficient move, it can be used + // as follows: + // + // T value = std::move(statusor).value_or(def); + // + // Unlike with `value`, calling `std::move()` on the result of `value_or` will + // still trigger a copy. + template + T value_or(U&& default_value) const&; + template + T value_or(U&& default_value) &&; + + // StatusOr::IgnoreError() + // + // Ignores any errors. This method does nothing except potentially suppress + // complaints from any tools that are checking that errors are not dropped on + // the floor. + void IgnoreError() const; + + // StatusOr::emplace() + // + // Reconstructs the inner value T in-place using the provided args, using the + // T(args...) constructor. Returns reference to the reconstructed `T`. + template + T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ok()) + { + this->Clear(); + this->MakeValue(std::forward(args)...); + } + else + { + this->MakeValue(std::forward(args)...); + this->status_ = absl::OkStatus(); + } + return this->data_; + } + + template< + typename U, + typename... Args, + absl::enable_if_t< + std::is_constructible&, Args&&...>::value, + int> = 0> + T& emplace(std::initializer_list ilist, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + if (ok()) + { + this->Clear(); + this->MakeValue(ilist, std::forward(args)...); + } + else + { + this->MakeValue(ilist, std::forward(args)...); + this->status_ = absl::OkStatus(); + } + return this->data_; + } + + // StatusOr::AssignStatus() + // + // Sets the status of `absl::StatusOr` to the given non-ok status value. + // + // NOTE: We recommend using the constructor and `operator=` where possible. + // This method is intended for use in generic programming, to enable setting + // the status of a `StatusOr` when `T` may be `Status`. In that case, the + // constructor and `operator=` would assign into the inner value of type + // `Status`, rather than status of the `StatusOr` (b/280392796). + // + // REQUIRES: !Status(std::forward(v)).ok(). This requirement is DCHECKed. + // In optimized builds, passing absl::OkStatus() here will have the effect + // of passing absl::StatusCode::kInternal as a fallback. + using internal_statusor::StatusOrData::AssignStatus; + + private: + using internal_statusor::StatusOrData::Assign; + template + void Assign(const absl::StatusOr& other); + template + void Assign(absl::StatusOr&& other); + }; + + // operator==() + // + // This operator checks the equality of two `absl::StatusOr` objects. + template + bool operator==(const StatusOr& lhs, const StatusOr& rhs) + { + if (lhs.ok() && rhs.ok()) + return *lhs == *rhs; + return lhs.status() == rhs.status(); + } + + // operator!=() + // + // This operator checks the inequality of two `absl::StatusOr` objects. + template + bool operator!=(const StatusOr& lhs, const StatusOr& rhs) + { + return !(lhs == rhs); + } + + //------------------------------------------------------------------------------ + // Implementation details for StatusOr + //------------------------------------------------------------------------------ + + // TODO(sbenza): avoid the string here completely. + template + StatusOr::StatusOr() : + Base(Status(absl::StatusCode::kUnknown, "")) + { + } + + template + template + inline void StatusOr::Assign(const StatusOr& other) + { + if (other.ok()) + { + this->Assign(*other); + } + else + { + this->AssignStatus(other.status()); + } + } + + template + template + inline void StatusOr::Assign(StatusOr&& other) + { + if (other.ok()) + { + this->Assign(*std::move(other)); + } + else + { + this->AssignStatus(std::move(other).status()); + } + } + template + template + StatusOr::StatusOr(absl::in_place_t, Args&&... args) : + Base(absl::in_place, std::forward(args)...) + { + } + + template + template + StatusOr::StatusOr(absl::in_place_t, std::initializer_list ilist, Args&&... args) : + Base(absl::in_place, ilist, std::forward(args)...) + { + } + + template + const Status& StatusOr::status() const& + { + return this->status_; + } + template + Status StatusOr::status() && + { + return ok() ? OkStatus() : std::move(this->status_); + } + + template + const T& StatusOr::value() const& + { + if (!this->ok()) + internal_statusor::ThrowBadStatusOrAccess(this->status_); + return this->data_; + } + + template + T& StatusOr::value() & + { + if (!this->ok()) + internal_statusor::ThrowBadStatusOrAccess(this->status_); + return this->data_; + } + + template + const T&& StatusOr::value() const&& + { + if (!this->ok()) + { + internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_)); + } + return std::move(this->data_); + } + + template + T&& StatusOr::value() && + { + if (!this->ok()) + { + internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_)); + } + return std::move(this->data_); + } + + template + const T& StatusOr::operator*() const& + { + this->EnsureOk(); + return this->data_; + } + + template + T& StatusOr::operator*() & + { + this->EnsureOk(); + return this->data_; + } + + template + const T&& StatusOr::operator*() const&& + { + this->EnsureOk(); + return std::move(this->data_); + } + + template + T&& StatusOr::operator*() && + { + this->EnsureOk(); + return std::move(this->data_); + } + + template + const T* StatusOr::operator->() const + { + this->EnsureOk(); + return &this->data_; + } + + template + T* StatusOr::operator->() + { + this->EnsureOk(); + return &this->data_; + } + + template + template + T StatusOr::value_or(U&& default_value) const& + { + if (ok()) + { + return this->data_; + } + return std::forward(default_value); + } + + template + template + T StatusOr::value_or(U&& default_value) && + { + if (ok()) + { + return std::move(this->data_); + } + return std::forward(default_value); + } + + template + void StatusOr::IgnoreError() const + { + // no-op + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STATUS_STATUSOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/ascii.h b/CAPI/cpp/grpc/include/absl/strings/ascii.h new file mode 100644 index 00000000..62edb21f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/ascii.h @@ -0,0 +1,282 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: ascii.h +// ----------------------------------------------------------------------------- +// +// This package contains functions operating on characters and strings +// restricted to standard ASCII. These include character classification +// functions analogous to those found in the ANSI C Standard Library +// header file. +// +// C++ implementations provide functionality based on their +// C environment locale. In general, reliance on such a locale is not ideal, as +// the locale standard is problematic (and may not return invariant information +// for the same character set, for example). These `ascii_*()` functions are +// hard-wired for standard ASCII, much faster, and guaranteed to behave +// consistently. They will never be overloaded, nor will their function +// signature change. +// +// `ascii_isalnum()`, `ascii_isalpha()`, `ascii_isascii()`, `ascii_isblank()`, +// `ascii_iscntrl()`, `ascii_isdigit()`, `ascii_isgraph()`, `ascii_islower()`, +// `ascii_isprint()`, `ascii_ispunct()`, `ascii_isspace()`, `ascii_isupper()`, +// `ascii_isxdigit()` +// Analogous to the functions with similar names, these +// functions take an unsigned char and return a bool, based on whether the +// character matches the condition specified. +// +// If the input character has a numerical value greater than 127, these +// functions return `false`. +// +// `ascii_tolower()`, `ascii_toupper()` +// Analogous to the functions with similar names, these functions +// take an unsigned char and return a char. +// +// If the input character is not an ASCII {lower,upper}-case letter (including +// numerical values greater than 127) then the functions return the same value +// as the input character. + +#ifndef ABSL_STRINGS_ASCII_H_ +#define ABSL_STRINGS_ASCII_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace ascii_internal + { + + // Declaration for an array of bitfields holding character information. + ABSL_DLL extern const unsigned char kPropertyBits[256]; + + // Declaration for the array of characters to upper-case characters. + ABSL_DLL extern const char kToUpper[256]; + + // Declaration for the array of characters to lower-case characters. + ABSL_DLL extern const char kToLower[256]; + + } // namespace ascii_internal + + // ascii_isalpha() + // + // Determines whether the given character is an alphabetic character. + inline bool ascii_isalpha(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x01) != 0; + } + + // ascii_isalnum() + // + // Determines whether the given character is an alphanumeric character. + inline bool ascii_isalnum(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x04) != 0; + } + + // ascii_isspace() + // + // Determines whether the given character is a whitespace character (space, + // tab, vertical tab, formfeed, linefeed, or carriage return). + inline bool ascii_isspace(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x08) != 0; + } + + // ascii_ispunct() + // + // Determines whether the given character is a punctuation character. + inline bool ascii_ispunct(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x10) != 0; + } + + // ascii_isblank() + // + // Determines whether the given character is a blank character (tab or space). + inline bool ascii_isblank(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x20) != 0; + } + + // ascii_iscntrl() + // + // Determines whether the given character is a control character. + inline bool ascii_iscntrl(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x40) != 0; + } + + // ascii_isxdigit() + // + // Determines whether the given character can be represented as a hexadecimal + // digit character (i.e. {0-9} or {A-F}). + inline bool ascii_isxdigit(unsigned char c) + { + return (ascii_internal::kPropertyBits[c] & 0x80) != 0; + } + + // ascii_isdigit() + // + // Determines whether the given character can be represented as a decimal + // digit character (i.e. {0-9}). + inline bool ascii_isdigit(unsigned char c) + { + return c >= '0' && c <= '9'; + } + + // ascii_isprint() + // + // Determines whether the given character is printable, including spaces. + inline bool ascii_isprint(unsigned char c) + { + return c >= 32 && c < 127; + } + + // ascii_isgraph() + // + // Determines whether the given character has a graphical representation. + inline bool ascii_isgraph(unsigned char c) + { + return c > 32 && c < 127; + } + + // ascii_isupper() + // + // Determines whether the given character is uppercase. + inline bool ascii_isupper(unsigned char c) + { + return c >= 'A' && c <= 'Z'; + } + + // ascii_islower() + // + // Determines whether the given character is lowercase. + inline bool ascii_islower(unsigned char c) + { + return c >= 'a' && c <= 'z'; + } + + // ascii_isascii() + // + // Determines whether the given character is ASCII. + inline bool ascii_isascii(unsigned char c) + { + return c < 128; + } + + // ascii_tolower() + // + // Returns an ASCII character, converting to lowercase if uppercase is + // passed. Note that character values > 127 are simply returned. + inline char ascii_tolower(unsigned char c) + { + return ascii_internal::kToLower[c]; + } + + // Converts the characters in `s` to lowercase, changing the contents of `s`. + void AsciiStrToLower(std::string* s); + + // Creates a lowercase string from a given absl::string_view. + ABSL_MUST_USE_RESULT inline std::string AsciiStrToLower(absl::string_view s) + { + std::string result(s); + absl::AsciiStrToLower(&result); + return result; + } + + // ascii_toupper() + // + // Returns the ASCII character, converting to upper-case if lower-case is + // passed. Note that characters values > 127 are simply returned. + inline char ascii_toupper(unsigned char c) + { + return ascii_internal::kToUpper[c]; + } + + // Converts the characters in `s` to uppercase, changing the contents of `s`. + void AsciiStrToUpper(std::string* s); + + // Creates an uppercase string from a given absl::string_view. + ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(absl::string_view s) + { + std::string result(s); + absl::AsciiStrToUpper(&result); + return result; + } + + // Returns absl::string_view with whitespace stripped from the beginning of the + // given string_view. + ABSL_MUST_USE_RESULT inline absl::string_view StripLeadingAsciiWhitespace( + absl::string_view str + ) + { + auto it = std::find_if_not(str.begin(), str.end(), absl::ascii_isspace); + return str.substr(static_cast(it - str.begin())); + } + + // Strips in place whitespace from the beginning of the given string. + inline void StripLeadingAsciiWhitespace(std::string* str) + { + auto it = std::find_if_not(str->begin(), str->end(), absl::ascii_isspace); + str->erase(str->begin(), it); + } + + // Returns absl::string_view with whitespace stripped from the end of the given + // string_view. + ABSL_MUST_USE_RESULT inline absl::string_view StripTrailingAsciiWhitespace( + absl::string_view str + ) + { + auto it = std::find_if_not(str.rbegin(), str.rend(), absl::ascii_isspace); + return str.substr(0, static_cast(str.rend() - it)); + } + + // Strips in place whitespace from the end of the given string + inline void StripTrailingAsciiWhitespace(std::string* str) + { + auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace); + str->erase(static_cast(str->rend() - it)); + } + + // Returns absl::string_view with whitespace stripped from both ends of the + // given string_view. + ABSL_MUST_USE_RESULT inline absl::string_view StripAsciiWhitespace( + absl::string_view str + ) + { + return StripTrailingAsciiWhitespace(StripLeadingAsciiWhitespace(str)); + } + + // Strips in place whitespace from both ends of the given string + inline void StripAsciiWhitespace(std::string* str) + { + StripTrailingAsciiWhitespace(str); + StripLeadingAsciiWhitespace(str); + } + + // Removes leading, trailing, and consecutive internal whitespace. + void RemoveExtraAsciiWhitespace(std::string*); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_ASCII_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/charconv.h b/CAPI/cpp/grpc/include/absl/strings/charconv.h new file mode 100644 index 00000000..a64789fa --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/charconv.h @@ -0,0 +1,127 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_CHARCONV_H_ +#define ABSL_STRINGS_CHARCONV_H_ + +#include // NOLINT(build/c++11) + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Workalike compatibility version of std::chars_format from C++17. + // + // This is an bitfield enumerator which can be passed to absl::from_chars to + // configure the string-to-float conversion. + enum class chars_format + { + scientific = 1, + fixed = 2, + hex = 4, + general = fixed | scientific, + }; + + // The return result of a string-to-number conversion. + // + // `ec` will be set to `invalid_argument` if a well-formed number was not found + // at the start of the input range, `result_out_of_range` if a well-formed + // number was found, but it was out of the representable range of the requested + // type, or to std::errc() otherwise. + // + // If a well-formed number was found, `ptr` is set to one past the sequence of + // characters that were successfully parsed. If none was found, `ptr` is set + // to the `first` argument to from_chars. + struct from_chars_result + { + const char* ptr; + std::errc ec; + }; + + // Workalike compatibility version of std::from_chars from C++17. Currently + // this only supports the `double` and `float` types. + // + // This interface incorporates the proposed resolutions for library issues + // DR 3080 and DR 3081. If these are adopted with different wording, + // Abseil's behavior will change to match the standard. (The behavior most + // likely to change is for DR 3081, which says what `value` will be set to in + // the case of overflow and underflow. Code that wants to avoid possible + // breaking changes in this area should not depend on `value` when the returned + // from_chars_result indicates a range error.) + // + // Searches the range [first, last) for the longest matching pattern beginning + // at `first` that represents a floating point number. If one is found, store + // the result in `value`. + // + // The matching pattern format is almost the same as that of strtod(), except + // that (1) C locale is not respected, (2) an initial '+' character in the + // input range will never be matched, and (3) leading whitespaces are not + // ignored. + // + // If `fmt` is set, it must be one of the enumerator values of the chars_format. + // (This is despite the fact that chars_format is a bitmask type.) If set to + // `scientific`, a matching number must contain an exponent. If set to `fixed`, + // then an exponent will never match. (For example, the string "1e5" will be + // parsed as "1".) If set to `hex`, then a hexadecimal float is parsed in the + // format that strtod() accepts, except that a "0x" prefix is NOT matched. + // (In particular, in `hex` mode, the input "0xff" results in the largest + // matching pattern "0".) + absl::from_chars_result from_chars(const char* first, const char* last, + double& value, // NOLINT + chars_format fmt = chars_format::general); + + absl::from_chars_result from_chars(const char* first, const char* last, + float& value, // NOLINT + chars_format fmt = chars_format::general); + + // std::chars_format is specified as a bitmask type, which means the following + // operations must be provided: + inline constexpr chars_format operator&(chars_format lhs, chars_format rhs) + { + return static_cast(static_cast(lhs) & static_cast(rhs)); + } + inline constexpr chars_format operator|(chars_format lhs, chars_format rhs) + { + return static_cast(static_cast(lhs) | static_cast(rhs)); + } + inline constexpr chars_format operator^(chars_format lhs, chars_format rhs) + { + return static_cast(static_cast(lhs) ^ static_cast(rhs)); + } + inline constexpr chars_format operator~(chars_format arg) + { + return static_cast(~static_cast(arg)); + } + inline chars_format& operator&=(chars_format& lhs, chars_format rhs) + { + lhs = lhs & rhs; + return lhs; + } + inline chars_format& operator|=(chars_format& lhs, chars_format rhs) + { + lhs = lhs | rhs; + return lhs; + } + inline chars_format& operator^=(chars_format& lhs, chars_format rhs) + { + lhs = lhs ^ rhs; + return lhs; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CHARCONV_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/cord.h b/CAPI/cpp/grpc/include/absl/strings/cord.h new file mode 100644 index 00000000..069f25dc --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/cord.h @@ -0,0 +1,1920 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cord.h +// ----------------------------------------------------------------------------- +// +// This file defines the `absl::Cord` data structure and operations on that data +// structure. A Cord is a string-like sequence of characters optimized for +// specific use cases. Unlike a `std::string`, which stores an array of +// contiguous characters, Cord data is stored in a structure consisting of +// separate, reference-counted "chunks." +// +// Because a Cord consists of these chunks, data can be added to or removed from +// a Cord during its lifetime. Chunks may also be shared between Cords. Unlike a +// `std::string`, a Cord can therefore accommodate data that changes over its +// lifetime, though it's not quite "mutable"; it can change only in the +// attachment, detachment, or rearrangement of chunks of its constituent data. +// +// A Cord provides some benefit over `std::string` under the following (albeit +// narrow) circumstances: +// +// * Cord data is designed to grow and shrink over a Cord's lifetime. Cord +// provides efficient insertions and deletions at the start and end of the +// character sequences, avoiding copies in those cases. Static data should +// generally be stored as strings. +// * External memory consisting of string-like data can be directly added to +// a Cord without requiring copies or allocations. +// * Cord data may be shared and copied cheaply. Cord provides a copy-on-write +// implementation and cheap sub-Cord operations. Copying a Cord is an O(1) +// operation. +// +// As a consequence to the above, Cord data is generally large. Small data +// should generally use strings, as construction of a Cord requires some +// overhead. Small Cords (<= 15 bytes) are represented inline, but most small +// Cords are expected to grow over their lifetimes. +// +// Note that because a Cord is made up of separate chunked data, random access +// to character data within a Cord is slower than within a `std::string`. +// +// Thread Safety +// +// Cord has the same thread-safety properties as many other types like +// std::string, std::vector<>, int, etc -- it is thread-compatible. In +// particular, if threads do not call non-const methods, then it is safe to call +// const methods without synchronization. Copying a Cord produces a new instance +// that can be used concurrently with the original in arbitrary ways. + +#ifndef ABSL_STRINGS_CORD_H_ +#define ABSL_STRINGS_CORD_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/container/inlined_vector.h" +#include "absl/crc/internal/crc_cord_state.h" +#include "absl/functional/function_ref.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/cord_analysis.h" +#include "absl/strings/cord_buffer.h" +#include "absl/strings/internal/cord_data_edge.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_btree_reader.h" +#include "absl/strings/internal/cord_rep_crc.h" +#include "absl/strings/internal/cord_rep_ring.h" +#include "absl/strings/internal/cordz_functions.h" +#include "absl/strings/internal/cordz_info.h" +#include "absl/strings/internal/cordz_statistics.h" +#include "absl/strings/internal/cordz_update_scope.h" +#include "absl/strings/internal/cordz_update_tracker.h" +#include "absl/strings/internal/resize_uninitialized.h" +#include "absl/strings/internal/string_constant.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + class Cord; + class CordTestPeer; + template + Cord MakeCordFromExternal(absl::string_view, Releaser&&); + void CopyCordToString(const Cord& src, std::string* dst); + + // Cord memory accounting modes + enum class CordMemoryAccounting + { + // Counts the *approximate* number of bytes held in full or in part by this + // Cord (which may not remain the same between invocations). Cords that share + // memory could each be "charged" independently for the same shared memory. + // See also comment on `kTotalMorePrecise` on internally shared memory. + kTotal, + + // Counts the *approximate* number of bytes held in full or in part by this + // Cord for the distinct memory held by this cord. This option is similar + // to `kTotal`, except that if the cord has multiple references to the same + // memory, that memory is only counted once. + // + // For example: + // absl::Cord cord; + // cord.append(some_other_cord); + // cord.append(some_other_cord); + // // Counts `some_other_cord` twice: + // cord.EstimatedMemoryUsage(kTotal); + // // Counts `some_other_cord` once: + // cord.EstimatedMemoryUsage(kTotalMorePrecise); + // + // The `kTotalMorePrecise` number is more expensive to compute as it requires + // deduplicating all memory references. Applications should prefer to use + // `kFairShare` or `kTotal` unless they really need a more precise estimate + // on "how much memory is potentially held / kept alive by this cord?" + kTotalMorePrecise, + + // Counts the *approximate* number of bytes held in full or in part by this + // Cord weighted by the sharing ratio of that data. For example, if some data + // edge is shared by 4 different Cords, then each cord is attributed 1/4th of + // the total memory usage as a 'fair share' of the total memory usage. + kFairShare, + }; + + // Cord + // + // A Cord is a sequence of characters, designed to be more efficient than a + // `std::string` in certain circumstances: namely, large string data that needs + // to change over its lifetime or shared, especially when such data is shared + // across API boundaries. + // + // A Cord stores its character data in a structure that allows efficient prepend + // and append operations. This makes a Cord useful for large string data sent + // over in a wire format that may need to be prepended or appended at some point + // during the data exchange (e.g. HTTP, protocol buffers). For example, a + // Cord is useful for storing an HTTP request, and prepending an HTTP header to + // such a request. + // + // Cords should not be used for storing general string data, however. They + // require overhead to construct and are slower than strings for random access. + // + // The Cord API provides the following common API operations: + // + // * Create or assign Cords out of existing string data, memory, or other Cords + // * Append and prepend data to an existing Cord + // * Create new Sub-Cords from existing Cord data + // * Swap Cord data and compare Cord equality + // * Write out Cord data by constructing a `std::string` + // + // Additionally, the API provides iterator utilities to iterate through Cord + // data via chunks or character bytes. + // + class Cord + { + private: + template + using EnableIfString = + absl::enable_if_t::value, int>; + + public: + // Cord::Cord() Constructors. + + // Creates an empty Cord. + constexpr Cord() noexcept; + + // Creates a Cord from an existing Cord. Cord is copyable and efficiently + // movable. The moved-from state is valid but unspecified. + Cord(const Cord& src); + Cord(Cord&& src) noexcept; + Cord& operator=(const Cord& x); + Cord& operator=(Cord&& x) noexcept; + + // Creates a Cord from a `src` string. This constructor is marked explicit to + // prevent implicit Cord constructions from arguments convertible to an + // `absl::string_view`. + explicit Cord(absl::string_view src); + Cord& operator=(absl::string_view src); + + // Creates a Cord from a `std::string&&` rvalue. These constructors are + // templated to avoid ambiguities for types that are convertible to both + // `absl::string_view` and `std::string`, such as `const char*`. + template = 0> + explicit Cord(T&& src); + template = 0> + Cord& operator=(T&& src); + + // Cord::~Cord() + // + // Destructs the Cord. + ~Cord() + { + if (contents_.is_tree()) + DestroyCordSlow(); + } + + // MakeCordFromExternal() + // + // Creates a Cord that takes ownership of external string memory. The + // contents of `data` are not copied to the Cord; instead, the external + // memory is added to the Cord and reference-counted. This data may not be + // changed for the life of the Cord, though it may be prepended or appended + // to. + // + // `MakeCordFromExternal()` takes a callable "releaser" that is invoked when + // the reference count for `data` reaches zero. As noted above, this data must + // remain live until the releaser is invoked. The callable releaser also must: + // + // * be move constructible + // * support `void operator()(absl::string_view) const` or `void operator()` + // + // Example: + // + // Cord MakeCord(BlockPool* pool) { + // Block* block = pool->NewBlock(); + // FillBlock(block); + // return absl::MakeCordFromExternal( + // block->ToStringView(), + // [pool, block](absl::string_view v) { + // pool->FreeBlock(block, v); + // }); + // } + // + // WARNING: Because a Cord can be reference-counted, it's likely a bug if your + // releaser doesn't do anything. For example, consider the following: + // + // void Foo(const char* buffer, int len) { + // auto c = absl::MakeCordFromExternal(absl::string_view(buffer, len), + // [](absl::string_view) {}); + // + // // BUG: If Bar() copies its cord for any reason, including keeping a + // // substring of it, the lifetime of buffer might be extended beyond + // // when Foo() returns. + // Bar(c); + // } + template + friend Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser); + + // Cord::Clear() + // + // Releases the Cord data. Any nodes that share data with other Cords, if + // applicable, will have their reference counts reduced by 1. + ABSL_ATTRIBUTE_REINITIALIZES void Clear(); + + // Cord::Append() + // + // Appends data to the Cord, which may come from another Cord or other string + // data. + void Append(const Cord& src); + void Append(Cord&& src); + void Append(absl::string_view src); + template = 0> + void Append(T&& src); + + // Appends `buffer` to this cord, unless `buffer` has a zero length in which + // case this method has no effect on this cord instance. + // This method is guaranteed to consume `buffer`. + void Append(CordBuffer buffer); + + // Returns a CordBuffer, re-using potential existing capacity in this cord. + // + // Cord instances may have additional unused capacity in the last (or first) + // nodes of the underlying tree to facilitate amortized growth. This method + // allows applications to explicitly use this spare capacity if available, + // or create a new CordBuffer instance otherwise. + // If this cord has a final non-shared node with at least `min_capacity` + // available, then this method will return that buffer including its data + // contents. I.e.; the returned buffer will have a non-zero length, and + // a capacity of at least `buffer.length + min_capacity`. Otherwise, this + // method will return `CordBuffer::CreateWithDefaultLimit(capacity)`. + // + // Below an example of using GetAppendBuffer. Notice that in this example we + // use `GetAppendBuffer()` only on the first iteration. As we know nothing + // about any initial extra capacity in `cord`, we may be able to use the extra + // capacity. But as we add new buffers with fully utilized contents after that + // we avoid calling `GetAppendBuffer()` on subsequent iterations: while this + // works fine, it results in an unnecessary inspection of cord contents: + // + // void AppendRandomDataToCord(absl::Cord &cord, size_t n) { + // bool first = true; + // while (n > 0) { + // CordBuffer buffer = first ? cord.GetAppendBuffer(n) + // : CordBuffer::CreateWithDefaultLimit(n); + // absl::Span data = buffer.available_up_to(n); + // FillRandomValues(data.data(), data.size()); + // buffer.IncreaseLengthBy(data.size()); + // cord.Append(std::move(buffer)); + // n -= data.size(); + // first = false; + // } + // } + CordBuffer GetAppendBuffer(size_t capacity, size_t min_capacity = 16); + + // Returns a CordBuffer, re-using potential existing capacity in this cord. + // + // This function is identical to `GetAppendBuffer`, except that in the case + // where a new `CordBuffer` is allocated, it is allocated using the provided + // custom limit instead of the default limit. `GetAppendBuffer` will default + // to `CordBuffer::CreateWithDefaultLimit(capacity)` whereas this method + // will default to `CordBuffer::CreateWithCustomLimit(block_size, capacity)`. + // This method is equivalent to `GetAppendBuffer` if `block_size` is zero. + // See the documentation for `CreateWithCustomLimit` for more details on the + // restrictions and legal values for `block_size`. + CordBuffer GetCustomAppendBuffer(size_t block_size, size_t capacity, size_t min_capacity = 16); + + // Cord::Prepend() + // + // Prepends data to the Cord, which may come from another Cord or other string + // data. + void Prepend(const Cord& src); + void Prepend(absl::string_view src); + template = 0> + void Prepend(T&& src); + + // Prepends `buffer` to this cord, unless `buffer` has a zero length in which + // case this method has no effect on this cord instance. + // This method is guaranteed to consume `buffer`. + void Prepend(CordBuffer buffer); + + // Cord::RemovePrefix() + // + // Removes the first `n` bytes of a Cord. + void RemovePrefix(size_t n); + void RemoveSuffix(size_t n); + + // Cord::Subcord() + // + // Returns a new Cord representing the subrange [pos, pos + new_size) of + // *this. If pos >= size(), the result is empty(). If + // (pos + new_size) >= size(), the result is the subrange [pos, size()). + Cord Subcord(size_t pos, size_t new_size) const; + + // Cord::swap() + // + // Swaps the contents of the Cord with `other`. + void swap(Cord& other) noexcept; + + // swap() + // + // Swaps the contents of two Cords. + friend void swap(Cord& x, Cord& y) noexcept + { + x.swap(y); + } + + // Cord::size() + // + // Returns the size of the Cord. + size_t size() const; + + // Cord::empty() + // + // Determines whether the given Cord is empty, returning `true` is so. + bool empty() const; + + // Cord::EstimatedMemoryUsage() + // + // Returns the *approximate* number of bytes held by this cord. + // See CordMemoryAccounting for more information on the accounting method. + size_t EstimatedMemoryUsage(CordMemoryAccounting accounting_method = CordMemoryAccounting::kTotal) const; + + // Cord::Compare() + // + // Compares 'this' Cord with rhs. This function and its relatives treat Cords + // as sequences of unsigned bytes. The comparison is a straightforward + // lexicographic comparison. `Cord::Compare()` returns values as follows: + // + // -1 'this' Cord is smaller + // 0 two Cords are equal + // 1 'this' Cord is larger + int Compare(absl::string_view rhs) const; + int Compare(const Cord& rhs) const; + + // Cord::StartsWith() + // + // Determines whether the Cord starts with the passed string data `rhs`. + bool StartsWith(const Cord& rhs) const; + bool StartsWith(absl::string_view rhs) const; + + // Cord::EndsWith() + // + // Determines whether the Cord ends with the passed string data `rhs`. + bool EndsWith(absl::string_view rhs) const; + bool EndsWith(const Cord& rhs) const; + + // Cord::operator std::string() + // + // Converts a Cord into a `std::string()`. This operator is marked explicit to + // prevent unintended Cord usage in functions that take a string. + explicit operator std::string() const; + + // CopyCordToString() + // + // Copies the contents of a `src` Cord into a `*dst` string. + // + // This function optimizes the case of reusing the destination string since it + // can reuse previously allocated capacity. However, this function does not + // guarantee that pointers previously returned by `dst->data()` remain valid + // even if `*dst` had enough capacity to hold `src`. If `*dst` is a new + // object, prefer to simply use the conversion operator to `std::string`. + friend void CopyCordToString(const Cord& src, std::string* dst); + + class CharIterator; + + //---------------------------------------------------------------------------- + // Cord::ChunkIterator + //---------------------------------------------------------------------------- + // + // A `Cord::ChunkIterator` allows iteration over the constituent chunks of its + // Cord. Such iteration allows you to perform non-const operations on the data + // of a Cord without modifying it. + // + // Generally, you do not instantiate a `Cord::ChunkIterator` directly; + // instead, you create one implicitly through use of the `Cord::Chunks()` + // member function. + // + // The `Cord::ChunkIterator` has the following properties: + // + // * The iterator is invalidated after any non-const operation on the + // Cord object over which it iterates. + // * The `string_view` returned by dereferencing a valid, non-`end()` + // iterator is guaranteed to be non-empty. + // * Two `ChunkIterator` objects can be compared equal if and only if they + // remain valid and iterate over the same Cord. + // * The iterator in this case is a proxy iterator; the `string_view` + // returned by the iterator does not live inside the Cord, and its + // lifetime is limited to the lifetime of the iterator itself. To help + // prevent lifetime issues, `ChunkIterator::reference` is not a true + // reference type and is equivalent to `value_type`. + // * The iterator keeps state that can grow for Cords that contain many + // nodes and are imbalanced due to sharing. Prefer to pass this type by + // const reference instead of by value. + class ChunkIterator + { + public: + using iterator_category = std::input_iterator_tag; + using value_type = absl::string_view; + using difference_type = ptrdiff_t; + using pointer = const value_type*; + using reference = value_type; + + ChunkIterator() = default; + + ChunkIterator& operator++(); + ChunkIterator operator++(int); + bool operator==(const ChunkIterator& other) const; + bool operator!=(const ChunkIterator& other) const; + reference operator*() const; + pointer operator->() const; + + friend class Cord; + friend class CharIterator; + + private: + using CordRep = absl::cord_internal::CordRep; + using CordRepBtree = absl::cord_internal::CordRepBtree; + using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader; + + // Constructs a `begin()` iterator from `tree`. `tree` must not be null. + explicit ChunkIterator(cord_internal::CordRep* tree); + + // Constructs a `begin()` iterator from `cord`. + explicit ChunkIterator(const Cord* cord); + + // Initializes this instance from a tree. Invoked by constructors. + void InitTree(cord_internal::CordRep* tree); + + // Removes `n` bytes from `current_chunk_`. Expects `n` to be smaller than + // `current_chunk_.size()`. + void RemoveChunkPrefix(size_t n); + Cord AdvanceAndReadBytes(size_t n); + void AdvanceBytes(size_t n); + + // Btree specific operator++ + ChunkIterator& AdvanceBtree(); + void AdvanceBytesBtree(size_t n); + + // A view into bytes of the current `CordRep`. It may only be a view to a + // suffix of bytes if this is being used by `CharIterator`. + absl::string_view current_chunk_; + // The current leaf, or `nullptr` if the iterator points to short data. + // If the current chunk is a substring node, current_leaf_ points to the + // underlying flat or external node. + absl::cord_internal::CordRep* current_leaf_ = nullptr; + // The number of bytes left in the `Cord` over which we are iterating. + size_t bytes_remaining_ = 0; + + // Cord reader for cord btrees. Empty if not traversing a btree. + CordRepBtreeReader btree_reader_; + }; + + // Cord::chunk_begin() + // + // Returns an iterator to the first chunk of the `Cord`. + // + // Generally, prefer using `Cord::Chunks()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `ChunkIterator` where range-based for-loops are not useful. + // + // Example: + // + // absl::Cord::ChunkIterator FindAsChunk(const absl::Cord& c, + // absl::string_view s) { + // return std::find(c.chunk_begin(), c.chunk_end(), s); + // } + ChunkIterator chunk_begin() const; + + // Cord::chunk_end() + // + // Returns an iterator one increment past the last chunk of the `Cord`. + // + // Generally, prefer using `Cord::Chunks()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `ChunkIterator` where range-based for-loops may not be available. + ChunkIterator chunk_end() const; + + //---------------------------------------------------------------------------- + // Cord::ChunkRange + //---------------------------------------------------------------------------- + // + // `ChunkRange` is a helper class for iterating over the chunks of the `Cord`, + // producing an iterator which can be used within a range-based for loop. + // Construction of a `ChunkRange` will return an iterator pointing to the + // first chunk of the Cord. Generally, do not construct a `ChunkRange` + // directly; instead, prefer to use the `Cord::Chunks()` method. + // + // Implementation note: `ChunkRange` is simply a convenience wrapper over + // `Cord::chunk_begin()` and `Cord::chunk_end()`. + class ChunkRange + { + public: + // Fulfill minimum c++ container requirements [container.requirements] + // These (partial) container type definitions allow ChunkRange to be used + // in various utilities expecting a subset of [container.requirements]. + // For example, the below enables using `::testing::ElementsAre(...)` + using value_type = absl::string_view; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = ChunkIterator; + using const_iterator = ChunkIterator; + + explicit ChunkRange(const Cord* cord) : + cord_(cord) + { + } + + ChunkIterator begin() const; + ChunkIterator end() const; + + private: + const Cord* cord_; + }; + + // Cord::Chunks() + // + // Returns a `Cord::ChunkRange` for iterating over the chunks of a `Cord` with + // a range-based for-loop. For most iteration tasks on a Cord, use + // `Cord::Chunks()` to retrieve this iterator. + // + // Example: + // + // void ProcessChunks(const Cord& cord) { + // for (absl::string_view chunk : cord.Chunks()) { ... } + // } + // + // Note that the ordinary caveats of temporary lifetime extension apply: + // + // void Process() { + // for (absl::string_view chunk : CordFactory().Chunks()) { + // // The temporary Cord returned by CordFactory has been destroyed! + // } + // } + ChunkRange Chunks() const; + + //---------------------------------------------------------------------------- + // Cord::CharIterator + //---------------------------------------------------------------------------- + // + // A `Cord::CharIterator` allows iteration over the constituent characters of + // a `Cord`. + // + // Generally, you do not instantiate a `Cord::CharIterator` directly; instead, + // you create one implicitly through use of the `Cord::Chars()` member + // function. + // + // A `Cord::CharIterator` has the following properties: + // + // * The iterator is invalidated after any non-const operation on the + // Cord object over which it iterates. + // * Two `CharIterator` objects can be compared equal if and only if they + // remain valid and iterate over the same Cord. + // * The iterator keeps state that can grow for Cords that contain many + // nodes and are imbalanced due to sharing. Prefer to pass this type by + // const reference instead of by value. + // * This type cannot act as a forward iterator because a `Cord` can reuse + // sections of memory. This fact violates the requirement for forward + // iterators to compare equal if dereferencing them returns the same + // object. + class CharIterator + { + public: + using iterator_category = std::input_iterator_tag; + using value_type = char; + using difference_type = ptrdiff_t; + using pointer = const char*; + using reference = const char&; + + CharIterator() = default; + + CharIterator& operator++(); + CharIterator operator++(int); + bool operator==(const CharIterator& other) const; + bool operator!=(const CharIterator& other) const; + reference operator*() const; + pointer operator->() const; + + friend Cord; + + private: + explicit CharIterator(const Cord* cord) : + chunk_iterator_(cord) + { + } + + ChunkIterator chunk_iterator_; + }; + + // Cord::AdvanceAndRead() + // + // Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes + // advanced as a separate `Cord`. `n_bytes` must be less than or equal to the + // number of bytes within the Cord; otherwise, behavior is undefined. It is + // valid to pass `char_end()` and `0`. + static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes); + + // Cord::Advance() + // + // Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than + // or equal to the number of bytes remaining within the Cord; otherwise, + // behavior is undefined. It is valid to pass `char_end()` and `0`. + static void Advance(CharIterator* it, size_t n_bytes); + + // Cord::ChunkRemaining() + // + // Returns the longest contiguous view starting at the iterator's position. + // + // `it` must be dereferenceable. + static absl::string_view ChunkRemaining(const CharIterator& it); + + // Cord::char_begin() + // + // Returns an iterator to the first character of the `Cord`. + // + // Generally, prefer using `Cord::Chars()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `CharIterator` where range-based for-loops may not be available. + CharIterator char_begin() const; + + // Cord::char_end() + // + // Returns an iterator to one past the last character of the `Cord`. + // + // Generally, prefer using `Cord::Chars()` within a range-based for loop for + // iterating over the chunks of a Cord. This method may be useful for getting + // a `CharIterator` where range-based for-loops are not useful. + CharIterator char_end() const; + + // Cord::CharRange + // + // `CharRange` is a helper class for iterating over the characters of a + // producing an iterator which can be used within a range-based for loop. + // Construction of a `CharRange` will return an iterator pointing to the first + // character of the Cord. Generally, do not construct a `CharRange` directly; + // instead, prefer to use the `Cord::Chars()` method shown below. + // + // Implementation note: `CharRange` is simply a convenience wrapper over + // `Cord::char_begin()` and `Cord::char_end()`. + class CharRange + { + public: + // Fulfill minimum c++ container requirements [container.requirements] + // These (partial) container type definitions allow CharRange to be used + // in various utilities expecting a subset of [container.requirements]. + // For example, the below enables using `::testing::ElementsAre(...)` + using value_type = char; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = CharIterator; + using const_iterator = CharIterator; + + explicit CharRange(const Cord* cord) : + cord_(cord) + { + } + + CharIterator begin() const; + CharIterator end() const; + + private: + const Cord* cord_; + }; + + // Cord::Chars() + // + // Returns a `Cord::CharRange` for iterating over the characters of a `Cord` + // with a range-based for-loop. For most character-based iteration tasks on a + // Cord, use `Cord::Chars()` to retrieve this iterator. + // + // Example: + // + // void ProcessCord(const Cord& cord) { + // for (char c : cord.Chars()) { ... } + // } + // + // Note that the ordinary caveats of temporary lifetime extension apply: + // + // void Process() { + // for (char c : CordFactory().Chars()) { + // // The temporary Cord returned by CordFactory has been destroyed! + // } + // } + CharRange Chars() const; + + // Cord::operator[] + // + // Gets the "i"th character of the Cord and returns it, provided that + // 0 <= i < Cord.size(). + // + // NOTE: This routine is reasonably efficient. It is roughly + // logarithmic based on the number of chunks that make up the cord. Still, + // if you need to iterate over the contents of a cord, you should + // use a CharIterator/ChunkIterator rather than call operator[] or Get() + // repeatedly in a loop. + char operator[](size_t i) const; + + // Cord::TryFlat() + // + // If this cord's representation is a single flat array, returns a + // string_view referencing that array. Otherwise returns nullopt. + absl::optional TryFlat() const; + + // Cord::Flatten() + // + // Flattens the cord into a single array and returns a view of the data. + // + // If the cord was already flat, the contents are not modified. + absl::string_view Flatten(); + + // Supports absl::Cord as a sink object for absl::Format(). + friend void AbslFormatFlush(absl::Cord* cord, absl::string_view part) + { + cord->Append(part); + } + + // Cord::SetExpectedChecksum() + // + // Stores a checksum value with this non-empty cord instance, for later + // retrieval. + // + // The expected checksum is a number stored out-of-band, alongside the data. + // It is preserved across copies and assignments, but any mutations to a cord + // will cause it to lose its expected checksum. + // + // The expected checksum is not part of a Cord's value, and does not affect + // operations such as equality or hashing. + // + // This field is intended to store a CRC32C checksum for later validation, to + // help support end-to-end checksum workflows. However, the Cord API itself + // does no CRC validation, and assigns no meaning to this number. + // + // This call has no effect if this cord is empty. + void SetExpectedChecksum(uint32_t crc); + + // Returns this cord's expected checksum, if it has one. Otherwise, returns + // nullopt. + absl::optional ExpectedChecksum() const; + + template + friend H AbslHashValue(H hash_state, const absl::Cord& c) + { + absl::optional maybe_flat = c.TryFlat(); + if (maybe_flat.has_value()) + { + return H::combine(std::move(hash_state), *maybe_flat); + } + return c.HashFragmented(std::move(hash_state)); + } + + // Create a Cord with the contents of StringConstant::value. + // No allocations will be done and no data will be copied. + // This is an INTERNAL API and subject to change or removal. This API can only + // be used by spelling absl::strings_internal::MakeStringConstant, which is + // also an internal API. + template + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr Cord(strings_internal::StringConstant); + + private: + using CordRep = absl::cord_internal::CordRep; + using CordRepFlat = absl::cord_internal::CordRepFlat; + using CordzInfo = cord_internal::CordzInfo; + using CordzUpdateScope = cord_internal::CordzUpdateScope; + using CordzUpdateTracker = cord_internal::CordzUpdateTracker; + using InlineData = cord_internal::InlineData; + using MethodIdentifier = CordzUpdateTracker::MethodIdentifier; + + // Creates a cord instance with `method` representing the originating + // public API call causing the cord to be created. + explicit Cord(absl::string_view src, MethodIdentifier method); + + friend class CordTestPeer; + friend bool operator==(const Cord& lhs, const Cord& rhs); + friend bool operator==(const Cord& lhs, absl::string_view rhs); + + friend const CordzInfo* GetCordzInfoForTesting(const Cord& cord); + + // Calls the provided function once for each cord chunk, in order. Unlike + // Chunks(), this API will not allocate memory. + void ForEachChunk(absl::FunctionRef) const; + + // Allocates new contiguous storage for the contents of the cord. This is + // called by Flatten() when the cord was not already flat. + absl::string_view FlattenSlowPath(); + + // Actual cord contents are hidden inside the following simple + // class so that we can isolate the bulk of cord.cc from changes + // to the representation. + // + // InlineRep holds either a tree pointer, or an array of kMaxInline bytes. + class InlineRep + { + public: + static constexpr unsigned char kMaxInline = cord_internal::kMaxInline; + static_assert(kMaxInline >= sizeof(absl::cord_internal::CordRep*), ""); + + constexpr InlineRep() : + data_() + { + } + explicit InlineRep(InlineData::DefaultInitType init) : + data_(init) + { + } + InlineRep(const InlineRep& src); + InlineRep(InlineRep&& src); + InlineRep& operator=(const InlineRep& src); + InlineRep& operator=(InlineRep&& src) noexcept; + + explicit constexpr InlineRep(absl::string_view sv, CordRep* rep); + + void Swap(InlineRep* rhs); + bool empty() const; + size_t size() const; + const char* data() const; // Returns nullptr if holding pointer + void set_data(const char* data, size_t n); // Discards pointer, if any + char* set_data(size_t n); // Write data to the result + // Returns nullptr if holding bytes + absl::cord_internal::CordRep* tree() const; + absl::cord_internal::CordRep* as_tree() const; + const char* as_chars() const; + // Returns non-null iff was holding a pointer + absl::cord_internal::CordRep* clear(); + // Converts to pointer if necessary. + void reduce_size(size_t n); // REQUIRES: holding data + void remove_prefix(size_t n); // REQUIRES: holding data + void AppendArray(absl::string_view src, MethodIdentifier method); + absl::string_view FindFlatStartPiece() const; + + // Creates a CordRepFlat instance from the current inlined data with `extra' + // bytes of desired additional capacity. + CordRepFlat* MakeFlatWithExtraCapacity(size_t extra); + + // Sets the tree value for this instance. `rep` must not be null. + // Requires the current instance to hold a tree, and a lock to be held on + // any CordzInfo referenced by this instance. The latter is enforced through + // the CordzUpdateScope argument. If the current instance is sampled, then + // the CordzInfo instance is updated to reference the new `rep` value. + void SetTree(CordRep* rep, const CordzUpdateScope& scope); + + // Identical to SetTree(), except that `rep` is allowed to be null, in + // which case the current instance is reset to an empty value. + void SetTreeOrEmpty(CordRep* rep, const CordzUpdateScope& scope); + + // Sets the tree value for this instance, and randomly samples this cord. + // This function disregards existing contents in `data_`, and should be + // called when a Cord is 'promoted' from an 'uninitialized' or 'inlined' + // value to a non-inlined (tree / ring) value. + void EmplaceTree(CordRep* rep, MethodIdentifier method); + + // Identical to EmplaceTree, except that it copies the parent stack from + // the provided `parent` data if the parent is sampled. + void EmplaceTree(CordRep* rep, const InlineData& parent, MethodIdentifier method); + + // Commits the change of a newly created, or updated `rep` root value into + // this cord. `old_rep` indicates the old (inlined or tree) value of the + // cord, and determines if the commit invokes SetTree() or EmplaceTree(). + void CommitTree(const CordRep* old_rep, CordRep* rep, const CordzUpdateScope& scope, MethodIdentifier method); + + void AppendTreeToInlined(CordRep* tree, MethodIdentifier method); + void AppendTreeToTree(CordRep* tree, MethodIdentifier method); + void AppendTree(CordRep* tree, MethodIdentifier method); + void PrependTreeToInlined(CordRep* tree, MethodIdentifier method); + void PrependTreeToTree(CordRep* tree, MethodIdentifier method); + void PrependTree(CordRep* tree, MethodIdentifier method); + + bool IsSame(const InlineRep& other) const + { + return data_ == other.data_; + } + + void CopyTo(std::string* dst) const + { + // memcpy is much faster when operating on a known size. On most supported + // platforms, the small string optimization is large enough that resizing + // to 15 bytes does not cause a memory allocation. + absl::strings_internal::STLStringResizeUninitialized(dst, kMaxInline); + data_.copy_max_inline_to(&(*dst)[0]); + // erase is faster than resize because the logic for memory allocation is + // not needed. + dst->erase(inline_size()); + } + + // Copies the inline contents into `dst`. Assumes the cord is not empty. + void CopyToArray(char* dst) const; + + bool is_tree() const + { + return data_.is_tree(); + } + + // Returns true if the Cord is being profiled by cordz. + bool is_profiled() const + { + return data_.is_tree() && data_.is_profiled(); + } + + // Returns the available inlined capacity, or 0 if is_tree() == true. + size_t remaining_inline_capacity() const + { + return data_.is_tree() ? 0 : kMaxInline - data_.inline_size(); + } + + // Returns the profiled CordzInfo, or nullptr if not sampled. + absl::cord_internal::CordzInfo* cordz_info() const + { + return data_.cordz_info(); + } + + // Sets the profiled CordzInfo. `cordz_info` must not be null. + void set_cordz_info(cord_internal::CordzInfo* cordz_info) + { + assert(cordz_info != nullptr); + data_.set_cordz_info(cordz_info); + } + + // Resets the current cordz_info to null / empty. + void clear_cordz_info() + { + data_.clear_cordz_info(); + } + + private: + friend class Cord; + + void AssignSlow(const InlineRep& src); + // Unrefs the tree and stops profiling. + void UnrefTree(); + + void ResetToEmpty() + { + data_ = {}; + } + + void set_inline_size(size_t size) + { + data_.set_inline_size(size); + } + size_t inline_size() const + { + return data_.inline_size(); + } + + // Empty cords that carry a checksum have a CordRepCrc node with a null + // child node. The code can avoid lots of special cases where it would + // otherwise transition from tree to inline storage if we just remove the + // CordRepCrc node before mutations. Must never be called inside a + // CordzUpdateScope since it untracks the cordz info. + void MaybeRemoveEmptyCrcNode(); + + cord_internal::InlineData data_; + }; + InlineRep contents_; + + // Helper for GetFlat() and TryFlat(). + static bool GetFlatAux(absl::cord_internal::CordRep* rep, absl::string_view* fragment); + + // Helper for ForEachChunk(). + static void ForEachChunkAux( + absl::cord_internal::CordRep* rep, + absl::FunctionRef callback + ); + + // The destructor for non-empty Cords. + void DestroyCordSlow(); + + // Out-of-line implementation of slower parts of logic. + void CopyToArraySlowPath(char* dst) const; + int CompareSlowPath(absl::string_view rhs, size_t compared_size, size_t size_to_compare) const; + int CompareSlowPath(const Cord& rhs, size_t compared_size, size_t size_to_compare) const; + bool EqualsImpl(absl::string_view rhs, size_t size_to_compare) const; + bool EqualsImpl(const Cord& rhs, size_t size_to_compare) const; + int CompareImpl(const Cord& rhs) const; + + template + friend ResultType GenericCompare(const Cord& lhs, const RHS& rhs, size_t size_to_compare); + static absl::string_view GetFirstChunk(const Cord& c); + static absl::string_view GetFirstChunk(absl::string_view sv); + + // Returns a new reference to contents_.tree(), or steals an existing + // reference if called on an rvalue. + absl::cord_internal::CordRep* TakeRep() const&; + absl::cord_internal::CordRep* TakeRep() &&; + + // Helper for Append(). + template + void AppendImpl(C&& src); + + // Appends / Prepends `src` to this instance, using precise sizing. + // This method does explicitly not attempt to use any spare capacity + // in any pending last added private owned flat. + // Requires `src` to be <= kMaxFlatLength. + void AppendPrecise(absl::string_view src, MethodIdentifier method); + void PrependPrecise(absl::string_view src, MethodIdentifier method); + + CordBuffer GetAppendBufferSlowPath(size_t block_size, size_t capacity, size_t min_capacity); + + // Prepends the provided data to this instance. `method` contains the public + // API method for this action which is tracked for Cordz sampling purposes. + void PrependArray(absl::string_view src, MethodIdentifier method); + + // Assigns the value in 'src' to this instance, 'stealing' its contents. + // Requires src.length() > kMaxBytesToCopy. + Cord& AssignLargeString(std::string&& src); + + // Helper for AbslHashValue(). + template + H HashFragmented(H hash_state) const + { + typename H::AbslInternalPiecewiseCombiner combiner; + ForEachChunk([&combiner, &hash_state](absl::string_view chunk) + { hash_state = combiner.add_buffer(std::move(hash_state), chunk.data(), chunk.size()); }); + return H::combine(combiner.finalize(std::move(hash_state)), size()); + } + + friend class CrcCord; + void SetCrcCordState(crc_internal::CrcCordState state); + const crc_internal::CrcCordState* MaybeGetCrcCordState() const; + }; + + ABSL_NAMESPACE_END +} // namespace absl + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // allow a Cord to be logged + extern std::ostream& operator<<(std::ostream& out, const Cord& cord); + + // ------------------------------------------------------------------ + // Internal details follow. Clients should ignore. + + namespace cord_internal + { + + // Does non-template-specific `CordRepExternal` initialization. + // Requires `data` to be non-empty. + void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep); + + // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer + // to it. Requires `data` to be non-empty. + template + // NOLINTNEXTLINE - suppress clang-tidy raw pointer return. + CordRep* NewExternalRep(absl::string_view data, Releaser&& releaser) + { + assert(!data.empty()); + using ReleaserType = absl::decay_t; + CordRepExternal* rep = new CordRepExternalImpl( + std::forward(releaser), 0 + ); + InitializeCordRepExternal(data, rep); + return rep; + } + + // Overload for function reference types that dispatches using a function + // pointer because there are no `alignof()` or `sizeof()` a function reference. + // NOLINTNEXTLINE - suppress clang-tidy raw pointer return. + inline CordRep* NewExternalRep(absl::string_view data, void (&releaser)(absl::string_view)) + { + return NewExternalRep(data, &releaser); + } + + } // namespace cord_internal + + template + Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) + { + Cord cord; + if (ABSL_PREDICT_TRUE(!data.empty())) + { + cord.contents_.EmplaceTree(::absl::cord_internal::NewExternalRep(data, std::forward(releaser)), Cord::MethodIdentifier::kMakeCordFromExternal); + } + else + { + using ReleaserType = absl::decay_t; + cord_internal::InvokeReleaser( + cord_internal::Rank0{}, ReleaserType(std::forward(releaser)), data + ); + } + return cord; + } + + constexpr Cord::InlineRep::InlineRep(absl::string_view sv, CordRep* rep) : + data_(sv, rep) + { + } + + inline Cord::InlineRep::InlineRep(const Cord::InlineRep& src) : + data_(InlineData::kDefaultInit) + { + if (CordRep* tree = src.tree()) + { + EmplaceTree(CordRep::Ref(tree), src.data_, CordzUpdateTracker::kConstructorCord); + } + else + { + data_ = src.data_; + } + } + + inline Cord::InlineRep::InlineRep(Cord::InlineRep&& src) : + data_(src.data_) + { + src.ResetToEmpty(); + } + + inline Cord::InlineRep& Cord::InlineRep::operator=(const Cord::InlineRep& src) + { + if (this == &src) + { + return *this; + } + if (!is_tree() && !src.is_tree()) + { + data_ = src.data_; + return *this; + } + AssignSlow(src); + return *this; + } + + inline Cord::InlineRep& Cord::InlineRep::operator=( + Cord::InlineRep&& src + ) noexcept + { + if (is_tree()) + { + UnrefTree(); + } + data_ = src.data_; + src.ResetToEmpty(); + return *this; + } + + inline void Cord::InlineRep::Swap(Cord::InlineRep* rhs) + { + if (rhs == this) + { + return; + } + std::swap(data_, rhs->data_); + } + + inline const char* Cord::InlineRep::data() const + { + return is_tree() ? nullptr : data_.as_chars(); + } + + inline const char* Cord::InlineRep::as_chars() const + { + assert(!data_.is_tree()); + return data_.as_chars(); + } + + inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const + { + assert(data_.is_tree()); + return data_.as_tree(); + } + + inline absl::cord_internal::CordRep* Cord::InlineRep::tree() const + { + if (is_tree()) + { + return as_tree(); + } + else + { + return nullptr; + } + } + + inline bool Cord::InlineRep::empty() const + { + return data_.is_empty(); + } + + inline size_t Cord::InlineRep::size() const + { + return is_tree() ? as_tree()->length : inline_size(); + } + + inline cord_internal::CordRepFlat* Cord::InlineRep::MakeFlatWithExtraCapacity( + size_t extra + ) + { + static_assert(cord_internal::kMinFlatLength >= sizeof(data_), ""); + size_t len = data_.inline_size(); + auto* result = CordRepFlat::New(len + extra); + result->length = len; + data_.copy_max_inline_to(result->Data()); + return result; + } + + inline void Cord::InlineRep::EmplaceTree(CordRep* rep, MethodIdentifier method) + { + assert(rep); + data_.make_tree(rep); + CordzInfo::MaybeTrackCord(data_, method); + } + + inline void Cord::InlineRep::EmplaceTree(CordRep* rep, const InlineData& parent, MethodIdentifier method) + { + data_.make_tree(rep); + CordzInfo::MaybeTrackCord(data_, parent, method); + } + + inline void Cord::InlineRep::SetTree(CordRep* rep, const CordzUpdateScope& scope) + { + assert(rep); + assert(data_.is_tree()); + data_.set_tree(rep); + scope.SetCordRep(rep); + } + + inline void Cord::InlineRep::SetTreeOrEmpty(CordRep* rep, const CordzUpdateScope& scope) + { + assert(data_.is_tree()); + if (rep) + { + data_.set_tree(rep); + } + else + { + data_ = {}; + } + scope.SetCordRep(rep); + } + + inline void Cord::InlineRep::CommitTree(const CordRep* old_rep, CordRep* rep, const CordzUpdateScope& scope, MethodIdentifier method) + { + if (old_rep) + { + SetTree(rep, scope); + } + else + { + EmplaceTree(rep, method); + } + } + + inline absl::cord_internal::CordRep* Cord::InlineRep::clear() + { + if (is_tree()) + { + CordzInfo::MaybeUntrackCord(cordz_info()); + } + absl::cord_internal::CordRep* result = tree(); + ResetToEmpty(); + return result; + } + + inline void Cord::InlineRep::CopyToArray(char* dst) const + { + assert(!is_tree()); + size_t n = inline_size(); + assert(n != 0); + cord_internal::SmallMemmove(dst, data_.as_chars(), n); + } + + inline void Cord::InlineRep::MaybeRemoveEmptyCrcNode() + { + CordRep* rep = tree(); + if (rep == nullptr || ABSL_PREDICT_TRUE(rep->length > 0)) + { + return; + } + assert(rep->IsCrc()); + assert(rep->crc()->child == nullptr); + CordzInfo::MaybeUntrackCord(cordz_info()); + CordRep::Unref(rep); + ResetToEmpty(); + } + + constexpr inline Cord::Cord() noexcept + { + } + + inline Cord::Cord(absl::string_view src) : + Cord(src, CordzUpdateTracker::kConstructorString) + { + } + + template + constexpr Cord::Cord(strings_internal::StringConstant) : + contents_(strings_internal::StringConstant::value, strings_internal::StringConstant::value.size() <= cord_internal::kMaxInline ? nullptr : &cord_internal::ConstInitExternalStorage>::value) + { + } + + inline Cord& Cord::operator=(const Cord& x) + { + contents_ = x.contents_; + return *this; + } + + template> + Cord& Cord::operator=(T&& src) + { + if (src.size() <= cord_internal::kMaxBytesToCopy) + { + return operator=(absl::string_view(src)); + } + else + { + return AssignLargeString(std::forward(src)); + } + } + + inline Cord::Cord(const Cord& src) : + contents_(src.contents_) + { + } + + inline Cord::Cord(Cord&& src) noexcept : + contents_(std::move(src.contents_)) + { + } + + inline void Cord::swap(Cord& other) noexcept + { + contents_.Swap(&other.contents_); + } + + inline Cord& Cord::operator=(Cord&& x) noexcept + { + contents_ = std::move(x.contents_); + return *this; + } + + extern template Cord::Cord(std::string&& src); + + inline size_t Cord::size() const + { + // Length is 1st field in str.rep_ + return contents_.size(); + } + + inline bool Cord::empty() const + { + return size() == 0; + } + + inline size_t Cord::EstimatedMemoryUsage( + CordMemoryAccounting accounting_method + ) const + { + size_t result = sizeof(Cord); + if (const absl::cord_internal::CordRep* rep = contents_.tree()) + { + switch (accounting_method) + { + case CordMemoryAccounting::kFairShare: + result += cord_internal::GetEstimatedFairShareMemoryUsage(rep); + break; + case CordMemoryAccounting::kTotalMorePrecise: + result += cord_internal::GetMorePreciseMemoryUsage(rep); + break; + case CordMemoryAccounting::kTotal: + result += cord_internal::GetEstimatedMemoryUsage(rep); + break; + } + } + return result; + } + + inline absl::optional Cord::TryFlat() const + { + absl::cord_internal::CordRep* rep = contents_.tree(); + if (rep == nullptr) + { + return absl::string_view(contents_.data(), contents_.size()); + } + absl::string_view fragment; + if (GetFlatAux(rep, &fragment)) + { + return fragment; + } + return absl::nullopt; + } + + inline absl::string_view Cord::Flatten() + { + absl::cord_internal::CordRep* rep = contents_.tree(); + if (rep == nullptr) + { + return absl::string_view(contents_.data(), contents_.size()); + } + else + { + absl::string_view already_flat_contents; + if (GetFlatAux(rep, &already_flat_contents)) + { + return already_flat_contents; + } + } + return FlattenSlowPath(); + } + + inline void Cord::Append(absl::string_view src) + { + contents_.AppendArray(src, CordzUpdateTracker::kAppendString); + } + + inline void Cord::Prepend(absl::string_view src) + { + PrependArray(src, CordzUpdateTracker::kPrependString); + } + + inline void Cord::Append(CordBuffer buffer) + { + if (ABSL_PREDICT_FALSE(buffer.length() == 0)) + return; + absl::string_view short_value; + if (CordRep* rep = buffer.ConsumeValue(short_value)) + { + contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer); + } + else + { + AppendPrecise(short_value, CordzUpdateTracker::kAppendCordBuffer); + } + } + + inline void Cord::Prepend(CordBuffer buffer) + { + if (ABSL_PREDICT_FALSE(buffer.length() == 0)) + return; + absl::string_view short_value; + if (CordRep* rep = buffer.ConsumeValue(short_value)) + { + contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer); + } + else + { + PrependPrecise(short_value, CordzUpdateTracker::kPrependCordBuffer); + } + } + + inline CordBuffer Cord::GetAppendBuffer(size_t capacity, size_t min_capacity) + { + if (empty()) + return CordBuffer::CreateWithDefaultLimit(capacity); + return GetAppendBufferSlowPath(0, capacity, min_capacity); + } + + inline CordBuffer Cord::GetCustomAppendBuffer(size_t block_size, size_t capacity, size_t min_capacity) + { + if (empty()) + { + return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity) : CordBuffer::CreateWithDefaultLimit(capacity); + } + return GetAppendBufferSlowPath(block_size, capacity, min_capacity); + } + + extern template void Cord::Append(std::string&& src); + extern template void Cord::Prepend(std::string&& src); + + inline int Cord::Compare(const Cord& rhs) const + { + if (!contents_.is_tree() && !rhs.contents_.is_tree()) + { + return contents_.data_.Compare(rhs.contents_.data_); + } + + return CompareImpl(rhs); + } + + // Does 'this' cord start/end with rhs + inline bool Cord::StartsWith(const Cord& rhs) const + { + if (contents_.IsSame(rhs.contents_)) + return true; + size_t rhs_size = rhs.size(); + if (size() < rhs_size) + return false; + return EqualsImpl(rhs, rhs_size); + } + + inline bool Cord::StartsWith(absl::string_view rhs) const + { + size_t rhs_size = rhs.size(); + if (size() < rhs_size) + return false; + return EqualsImpl(rhs, rhs_size); + } + + inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) + { + tree = cord_internal::SkipCrcNode(tree); + if (tree->tag == cord_internal::BTREE) + { + current_chunk_ = btree_reader_.Init(tree->btree()); + } + else + { + current_leaf_ = tree; + current_chunk_ = cord_internal::EdgeData(tree); + } + } + + inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) + { + bytes_remaining_ = tree->length; + InitTree(tree); + } + + inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) + { + if (CordRep* tree = cord->contents_.tree()) + { + bytes_remaining_ = tree->length; + if (ABSL_PREDICT_TRUE(bytes_remaining_ != 0)) + { + InitTree(tree); + } + else + { + current_chunk_ = {}; + } + } + else + { + bytes_remaining_ = cord->contents_.inline_size(); + current_chunk_ = {cord->contents_.data(), bytes_remaining_}; + } + } + + inline Cord::ChunkIterator& Cord::ChunkIterator::AdvanceBtree() + { + current_chunk_ = btree_reader_.Next(); + return *this; + } + + inline void Cord::ChunkIterator::AdvanceBytesBtree(size_t n) + { + assert(n >= current_chunk_.size()); + bytes_remaining_ -= n; + if (bytes_remaining_) + { + if (n == current_chunk_.size()) + { + current_chunk_ = btree_reader_.Next(); + } + else + { + size_t offset = btree_reader_.length() - bytes_remaining_; + current_chunk_ = btree_reader_.Seek(offset); + } + } + else + { + current_chunk_ = {}; + } + } + + inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() + { + ABSL_HARDENING_ASSERT(bytes_remaining_ > 0 && "Attempted to iterate past `end()`"); + assert(bytes_remaining_ >= current_chunk_.size()); + bytes_remaining_ -= current_chunk_.size(); + if (bytes_remaining_ > 0) + { + if (btree_reader_) + { + return AdvanceBtree(); + } + else + { + assert(!current_chunk_.empty()); // Called on invalid iterator. + } + current_chunk_ = {}; + } + return *this; + } + + inline Cord::ChunkIterator Cord::ChunkIterator::operator++(int) + { + ChunkIterator tmp(*this); + operator++(); + return tmp; + } + + inline bool Cord::ChunkIterator::operator==(const ChunkIterator& other) const + { + return bytes_remaining_ == other.bytes_remaining_; + } + + inline bool Cord::ChunkIterator::operator!=(const ChunkIterator& other) const + { + return !(*this == other); + } + + inline Cord::ChunkIterator::reference Cord::ChunkIterator::operator*() const + { + ABSL_HARDENING_ASSERT(bytes_remaining_ != 0); + return current_chunk_; + } + + inline Cord::ChunkIterator::pointer Cord::ChunkIterator::operator->() const + { + ABSL_HARDENING_ASSERT(bytes_remaining_ != 0); + return ¤t_chunk_; + } + + inline void Cord::ChunkIterator::RemoveChunkPrefix(size_t n) + { + assert(n < current_chunk_.size()); + current_chunk_.remove_prefix(n); + bytes_remaining_ -= n; + } + + inline void Cord::ChunkIterator::AdvanceBytes(size_t n) + { + assert(bytes_remaining_ >= n); + if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) + { + RemoveChunkPrefix(n); + } + else if (n != 0) + { + if (btree_reader_) + { + AdvanceBytesBtree(n); + } + else + { + bytes_remaining_ = 0; + } + } + } + + inline Cord::ChunkIterator Cord::chunk_begin() const + { + return ChunkIterator(this); + } + + inline Cord::ChunkIterator Cord::chunk_end() const + { + return ChunkIterator(); + } + + inline Cord::ChunkIterator Cord::ChunkRange::begin() const + { + return cord_->chunk_begin(); + } + + inline Cord::ChunkIterator Cord::ChunkRange::end() const + { + return cord_->chunk_end(); + } + + inline Cord::ChunkRange Cord::Chunks() const + { + return ChunkRange(this); + } + + inline Cord::CharIterator& Cord::CharIterator::operator++() + { + if (ABSL_PREDICT_TRUE(chunk_iterator_->size() > 1)) + { + chunk_iterator_.RemoveChunkPrefix(1); + } + else + { + ++chunk_iterator_; + } + return *this; + } + + inline Cord::CharIterator Cord::CharIterator::operator++(int) + { + CharIterator tmp(*this); + operator++(); + return tmp; + } + + inline bool Cord::CharIterator::operator==(const CharIterator& other) const + { + return chunk_iterator_ == other.chunk_iterator_; + } + + inline bool Cord::CharIterator::operator!=(const CharIterator& other) const + { + return !(*this == other); + } + + inline Cord::CharIterator::reference Cord::CharIterator::operator*() const + { + return *chunk_iterator_->data(); + } + + inline Cord::CharIterator::pointer Cord::CharIterator::operator->() const + { + return chunk_iterator_->data(); + } + + inline Cord Cord::AdvanceAndRead(CharIterator* it, size_t n_bytes) + { + assert(it != nullptr); + return it->chunk_iterator_.AdvanceAndReadBytes(n_bytes); + } + + inline void Cord::Advance(CharIterator* it, size_t n_bytes) + { + assert(it != nullptr); + it->chunk_iterator_.AdvanceBytes(n_bytes); + } + + inline absl::string_view Cord::ChunkRemaining(const CharIterator& it) + { + return *it.chunk_iterator_; + } + + inline Cord::CharIterator Cord::char_begin() const + { + return CharIterator(this); + } + + inline Cord::CharIterator Cord::char_end() const + { + return CharIterator(); + } + + inline Cord::CharIterator Cord::CharRange::begin() const + { + return cord_->char_begin(); + } + + inline Cord::CharIterator Cord::CharRange::end() const + { + return cord_->char_end(); + } + + inline Cord::CharRange Cord::Chars() const + { + return CharRange(this); + } + + inline void Cord::ForEachChunk( + absl::FunctionRef callback + ) const + { + absl::cord_internal::CordRep* rep = contents_.tree(); + if (rep == nullptr) + { + callback(absl::string_view(contents_.data(), contents_.size())); + } + else + { + ForEachChunkAux(rep, callback); + } + } + + // Nonmember Cord-to-Cord relational operators. + inline bool operator==(const Cord& lhs, const Cord& rhs) + { + if (lhs.contents_.IsSame(rhs.contents_)) + return true; + size_t rhs_size = rhs.size(); + if (lhs.size() != rhs_size) + return false; + return lhs.EqualsImpl(rhs, rhs_size); + } + + inline bool operator!=(const Cord& x, const Cord& y) + { + return !(x == y); + } + inline bool operator<(const Cord& x, const Cord& y) + { + return x.Compare(y) < 0; + } + inline bool operator>(const Cord& x, const Cord& y) + { + return x.Compare(y) > 0; + } + inline bool operator<=(const Cord& x, const Cord& y) + { + return x.Compare(y) <= 0; + } + inline bool operator>=(const Cord& x, const Cord& y) + { + return x.Compare(y) >= 0; + } + + // Nonmember Cord-to-absl::string_view relational operators. + // + // Due to implicit conversions, these also enable comparisons of Cord with + // with std::string, ::string, and const char*. + inline bool operator==(const Cord& lhs, absl::string_view rhs) + { + size_t lhs_size = lhs.size(); + size_t rhs_size = rhs.size(); + if (lhs_size != rhs_size) + return false; + return lhs.EqualsImpl(rhs, rhs_size); + } + + inline bool operator==(absl::string_view x, const Cord& y) + { + return y == x; + } + inline bool operator!=(const Cord& x, absl::string_view y) + { + return !(x == y); + } + inline bool operator!=(absl::string_view x, const Cord& y) + { + return !(x == y); + } + inline bool operator<(const Cord& x, absl::string_view y) + { + return x.Compare(y) < 0; + } + inline bool operator<(absl::string_view x, const Cord& y) + { + return y.Compare(x) > 0; + } + inline bool operator>(const Cord& x, absl::string_view y) + { + return y < x; + } + inline bool operator>(absl::string_view x, const Cord& y) + { + return y < x; + } + inline bool operator<=(const Cord& x, absl::string_view y) + { + return !(y < x); + } + inline bool operator<=(absl::string_view x, const Cord& y) + { + return !(y < x); + } + inline bool operator>=(const Cord& x, absl::string_view y) + { + return !(x < y); + } + inline bool operator>=(absl::string_view x, const Cord& y) + { + return !(x < y); + } + + // Some internals exposed to test code. + namespace strings_internal + { + class CordTestAccess + { + public: + static size_t FlatOverhead(); + static size_t MaxFlatLength(); + static size_t SizeofCordRepExternal(); + static size_t SizeofCordRepSubstring(); + static size_t FlatTagToLength(uint8_t tag); + static uint8_t LengthToTag(size_t s); + }; + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORD_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/cord_analysis.h b/CAPI/cpp/grpc/include/absl/strings/cord_analysis.h new file mode 100644 index 00000000..7cde7add --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/cord_analysis.h @@ -0,0 +1,63 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_CORD_ANALYSIS_H_ +#define ABSL_STRINGS_CORD_ANALYSIS_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // Returns the *approximate* number of bytes held in full or in part by this + // Cord (which may not remain the same between invocations). Cords that share + // memory could each be "charged" independently for the same shared memory. + size_t GetEstimatedMemoryUsage(const CordRep* rep); + + // Returns the *approximate* number of bytes held in full or in part by this + // Cord for the distinct memory held by this cord. This is similar to + // `GetEstimatedMemoryUsage()`, except that if the cord has multiple references + // to the same memory, that memory is only counted once. + // + // For example: + // absl::Cord cord; + // cord.append(some_other_cord); + // cord.append(some_other_cord); + // // Calls GetEstimatedMemoryUsage() and counts `other_cord` twice: + // cord.EstimatedMemoryUsage(kTotal); + // // Calls GetMorePreciseMemoryUsage() and counts `other_cord` once: + // cord.EstimatedMemoryUsage(kTotalMorePrecise); + // + // This is more expensive than `GetEstimatedMemoryUsage()` as it requires + // deduplicating all memory references. + size_t GetMorePreciseMemoryUsage(const CordRep* rep); + + // Returns the *approximate* number of bytes held in full or in part by this + // CordRep weighted by the sharing ratio of that data. For example, if some data + // edge is shared by 4 different Cords, then each cord is attribute 1/4th of + // the total memory usage as a 'fair share' of the total memory usage. + size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep); + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORD_ANALYSIS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/cord_buffer.h b/CAPI/cpp/grpc/include/absl/strings/cord_buffer.h new file mode 100644 index 00000000..22d8cb0a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/cord_buffer.h @@ -0,0 +1,641 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cord_buffer.h +// ----------------------------------------------------------------------------- +// +// This file defines an `absl::CordBuffer` data structure to hold data for +// eventual inclusion within an existing `Cord` data structure. Cord buffers are +// useful for building large Cords that may require custom allocation of its +// associated memory. +// +#ifndef ABSL_STRINGS_CORD_BUFFER_H_ +#define ABSL_STRINGS_CORD_BUFFER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/numeric/bits.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class Cord; + class CordBufferTestPeer; + + // CordBuffer + // + // CordBuffer manages memory buffers for purposes such as zero-copy APIs as well + // as applications building cords with large data requiring granular control + // over the allocation and size of cord data. For example, a function creating + // a cord of random data could use a CordBuffer as follows: + // + // absl::Cord CreateRandomCord(size_t length) { + // absl::Cord cord; + // while (length > 0) { + // CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(length); + // absl::Span data = buffer.available_up_to(length); + // FillRandomValues(data.data(), data.size()); + // buffer.IncreaseLengthBy(data.size()); + // cord.Append(std::move(buffer)); + // length -= data.size(); + // } + // return cord; + // } + // + // CordBuffer instances are by default limited to a capacity of `kDefaultLimit` + // bytes. `kDefaultLimit` is currently just under 4KiB, but this default may + // change in the future and/or for specific architectures. The default limit is + // aimed to provide a good trade-off between performance and memory overhead. + // Smaller buffers typically incur more compute cost while larger buffers are + // more CPU efficient but create significant memory overhead because of such + // allocations being less granular. Using larger buffers may also increase the + // risk of memory fragmentation. + // + // Applications create a buffer using one of the `CreateWithDefaultLimit()` or + // `CreateWithCustomLimit()` methods. The returned instance will have a non-zero + // capacity and a zero length. Applications use the `data()` method to set the + // contents of the managed memory, and once done filling the buffer, use the + // `IncreaseLengthBy()` or 'SetLength()' method to specify the length of the + // initialized data before adding the buffer to a Cord. + // + // The `CreateWithCustomLimit()` method is intended for applications needing + // larger buffers than the default memory limit, allowing the allocation of up + // to a capacity of `kCustomLimit` bytes minus some minimum internal overhead. + // The usage of `CreateWithCustomLimit()` should be limited to only those use + // cases where the distribution of the input is relatively well known, and/or + // where the trade-off between the efficiency gains outweigh the risk of memory + // fragmentation. See the documentation for `CreateWithCustomLimit()` for more + // information on using larger custom limits. + // + // The capacity of a `CordBuffer` returned by one of the `Create` methods may + // be larger than the requested capacity due to rounding, alignment and + // granularity of the memory allocator. Applications should use the `capacity` + // method to obtain the effective capacity of the returned instance as + // demonstrated in the provided example above. + // + // CordBuffer is a move-only class. All references into the managed memory are + // invalidated when an instance is moved into either another CordBuffer instance + // or a Cord. Writing to a location obtained by a previous call to `data()` + // after an instance was moved will lead to undefined behavior. + // + // A `moved from` CordBuffer instance will have a valid, but empty state. + // CordBuffer is thread compatible. + class CordBuffer + { + public: + // kDefaultLimit + // + // Default capacity limits of allocated CordBuffers. + // See the class comments for more information on allocation limits. + static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength; + + // kCustomLimit + // + // Maximum size for CreateWithCustomLimit() allocated buffers. + // Note that the effective capacity may be slightly less + // because of internal overhead of internal cord buffers. + static constexpr size_t kCustomLimit = 64U << 10; + + // Constructors, Destructors and Assignment Operators + + // Creates an empty CordBuffer. + CordBuffer() = default; + + // Destroys this CordBuffer instance and, if not empty, releases any memory + // managed by this instance, invalidating previously returned references. + ~CordBuffer(); + + // CordBuffer is move-only + CordBuffer(CordBuffer&& rhs) noexcept; + CordBuffer& operator=(CordBuffer&&) noexcept; + CordBuffer(const CordBuffer&) = delete; + CordBuffer& operator=(const CordBuffer&) = delete; + + // CordBuffer::MaximumPayload() + // + // Returns the guaranteed maximum payload for a CordBuffer returned by the + // `CreateWithDefaultLimit()` method. While small, each internal buffer inside + // a Cord incurs an overhead to manage the length, type and reference count + // for the buffer managed inside the cord tree. Applications can use this + // method to get approximate number of buffers required for a given byte + // size, etc. + // + // For example: + // const size_t payload = absl::CordBuffer::MaximumPayload(); + // const size_t buffer_count = (total_size + payload - 1) / payload; + // buffers.reserve(buffer_count); + static constexpr size_t MaximumPayload(); + + // Overload to the above `MaximumPayload()` except that it returns the + // maximum payload for a CordBuffer returned by the `CreateWithCustomLimit()` + // method given the provided `block_size`. + static constexpr size_t MaximumPayload(size_t block_size); + + // CordBuffer::CreateWithDefaultLimit() + // + // Creates a CordBuffer instance of the desired `capacity`, capped at the + // default limit `kDefaultLimit`. The returned buffer has a guaranteed + // capacity of at least `min(kDefaultLimit, capacity)`. See the class comments + // for more information on buffer capacities and intended usage. + static CordBuffer CreateWithDefaultLimit(size_t capacity); + + // CordBuffer::CreateWithCustomLimit() + // + // Creates a CordBuffer instance of the desired `capacity` rounded to an + // appropriate power of 2 size less than, or equal to `block_size`. + // Requires `block_size` to be a power of 2. + // + // If `capacity` is less than or equal to `kDefaultLimit`, then this method + // behaves identical to `CreateWithDefaultLimit`, which means that the caller + // is guaranteed to get a buffer of at least the requested capacity. + // + // If `capacity` is greater than or equal to `block_size`, then this method + // returns a buffer with an `allocated size` of `block_size` bytes. Otherwise, + // this methods returns a buffer with a suitable smaller power of 2 block size + // to satisfy the request. The actual size depends on a number of factors, and + // is typically (but not necessarily) the highest or second highest power of 2 + // value less than or equal to `capacity`. + // + // The 'allocated size' includes a small amount of overhead required for + // internal state, which is currently 13 bytes on 64-bit platforms. For + // example: a buffer created with `block_size` and `capacity' set to 8KiB + // will have an allocated size of 8KiB, and an effective internal `capacity` + // of 8KiB - 13 = 8179 bytes. + // + // To demonstrate this in practice, let's assume we want to read data from + // somewhat larger files using approximately 64KiB buffers: + // + // absl::Cord ReadFromFile(int fd, size_t n) { + // absl::Cord cord; + // while (n > 0) { + // CordBuffer buffer = CordBuffer::CreateWithCustomLimit(64 << 10, n); + // absl::Span data = buffer.available_up_to(n); + // ReadFileDataOrDie(fd, data.data(), data.size()); + // buffer.IncreaseLengthBy(data.size()); + // cord.Append(std::move(buffer)); + // n -= data.size(); + // } + // return cord; + // } + // + // If we'd use this function to read a file of 659KiB, we may get the + // following pattern of allocated cord buffer sizes: + // + // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523) + // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523) + // ... + // CreateWithCustomLimit(64KiB, 19586) --> ~16KiB (16371) + // CreateWithCustomLimit(64KiB, 3215) --> 3215 (at least 3215) + // + // The reason the method returns a 16K buffer instead of a roughly 19K buffer + // is to reduce memory overhead and fragmentation risks. Using carefully + // chosen power of 2 values reduces the entropy of allocated memory sizes. + // + // Additionally, let's assume we'd use the above function on files that are + // generally smaller than 64K. If we'd use 'precise' sized buffers for such + // files, than we'd get a very wide distribution of allocated memory sizes + // rounded to 4K page sizes, and we'd end up with a lot of unused capacity. + // + // In general, application should only use custom sizes if the data they are + // consuming or storing is expected to be many times the chosen block size, + // and be based on objective data and performance metrics. For example, a + // compress function may work faster and consume less CPU when using larger + // buffers. Such an application should pick a size offering a reasonable + // trade-off between expected data size, compute savings with larger buffers, + // and the cost or fragmentation effect of larger buffers. + // Applications must pick a reasonable spot on that curve, and make sure their + // data meets their expectations in size distributions such as "mostly large". + static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity); + + // CordBuffer::available() + // + // Returns the span delineating the available capacity in this buffer + // which is defined as `{ data() + length(), capacity() - length() }`. + absl::Span available(); + + // CordBuffer::available_up_to() + // + // Returns the span delineating the available capacity in this buffer limited + // to `size` bytes. This is equivalent to `available().subspan(0, size)`. + absl::Span available_up_to(size_t size); + + // CordBuffer::data() + // + // Returns a non-null reference to the data managed by this instance. + // Applications are allowed to write up to `capacity` bytes of instance data. + // CordBuffer data is uninitialized by default. Reading data from an instance + // that has not yet been initialized will lead to undefined behavior. + char* data(); + const char* data() const; + + // CordBuffer::length() + // + // Returns the length of this instance. The default length of a CordBuffer is + // 0, indicating an 'empty' CordBuffer. Applications must specify the length + // of the data in a CordBuffer before adding it to a Cord. + size_t length() const; + + // CordBuffer::capacity() + // + // Returns the capacity of this instance. All instances have a non-zero + // capacity: default and `moved from` instances have a small internal buffer. + size_t capacity() const; + + // CordBuffer::IncreaseLengthBy() + // + // Increases the length of this buffer by the specified 'n' bytes. + // Applications must make sure all data in this buffer up to the new length + // has been initialized before adding a CordBuffer to a Cord: failure to do so + // will lead to undefined behavior. Requires `length() + n <= capacity()`. + // Typically, applications will use 'available_up_to()` to get a span of the + // desired capacity, and use `span.size()` to increase the length as in: + // absl::Span span = buffer.available_up_to(desired); + // buffer.IncreaseLengthBy(span.size()); + // memcpy(span.data(), src, span.size()); + // etc... + void IncreaseLengthBy(size_t n); + + // CordBuffer::SetLength() + // + // Sets the data length of this instance. Applications must make sure all data + // of the specified length has been initialized before adding a CordBuffer to + // a Cord: failure to do so will lead to undefined behavior. + // Setting the length to a small value or zero does not release any memory + // held by this CordBuffer instance. Requires `length <= capacity()`. + // Applications should preferably use the `IncreaseLengthBy()` method above + // in combination with the 'available()` or `available_up_to()` methods. + void SetLength(size_t length); + + private: + // Make sure we don't accidentally over promise. + static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, ""); + + // Assume the cost of an 'uprounded' allocation to CeilPow2(size) versus + // the cost of allocating at least 1 extra flat <= 4KB: + // - Flat overhead = 13 bytes + // - Btree amortized cost / node =~ 13 bytes + // - 64 byte granularity of tcmalloc at 4K =~ 32 byte average + // CPU cost and efficiency requires we should at least 'save' something by + // splitting, as a poor man's measure, we say the slop needs to be + // at least double the cost offset to make it worth splitting: ~128 bytes. + static constexpr size_t kMaxPageSlop = 128; + + // Overhead for allocation a flat. + static constexpr size_t kOverhead = cord_internal::kFlatOverhead; + + using CordRepFlat = cord_internal::CordRepFlat; + + // `Rep` is the internal data representation of a CordBuffer. The internal + // representation has an internal small size optimization similar to + // std::string (SSO). + struct Rep + { + // Inline SSO size of a CordBuffer + static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1; + + // Creates a default instance with kInlineCapacity. + Rep() : + short_rep{} + { + } + + // Creates an instance managing an allocated non zero CordRep. + explicit Rep(cord_internal::CordRepFlat* rep) : + long_rep{rep} + { + assert(rep != nullptr); + } + + // Returns true if this instance manages the SSO internal buffer. + bool is_short() const + { + constexpr size_t offset = offsetof(Short, raw_size); + return (reinterpret_cast(this)[offset] & 1) != 0; + } + + // Returns the available area of the internal SSO data + absl::Span short_available() + { + const size_t length = short_length(); + return absl::Span(short_rep.data + length, kInlineCapacity - length); + } + + // Returns the available area of the internal SSO data + absl::Span long_available() const + { + assert(!is_short()); + const size_t length = long_rep.rep->length; + return absl::Span(long_rep.rep->Data() + length, long_rep.rep->Capacity() - length); + } + + // Returns the length of the internal SSO data. + size_t short_length() const + { + assert(is_short()); + return static_cast(short_rep.raw_size >> 1); + } + + // Sets the length of the internal SSO data. + // Disregards any previously set CordRep instance. + void set_short_length(size_t length) + { + short_rep.raw_size = static_cast((length << 1) + 1); + } + + // Adds `n` to the current short length. + void add_short_length(size_t n) + { + assert(is_short()); + short_rep.raw_size += static_cast(n << 1); + } + + // Returns reference to the internal SSO data buffer. + char* data() + { + assert(is_short()); + return short_rep.data; + } + const char* data() const + { + assert(is_short()); + return short_rep.data; + } + + // Returns a pointer the external CordRep managed by this instance. + cord_internal::CordRepFlat* rep() const + { + assert(!is_short()); + return long_rep.rep; + } + + // The internal representation takes advantage of the fact that allocated + // memory is always on an even address, and uses the least significant bit + // of the first or last byte (depending on endianness) as the inline size + // indicator overlapping with the least significant byte of the CordRep*. +#if defined(ABSL_IS_BIG_ENDIAN) + struct Long + { + explicit Long(cord_internal::CordRepFlat* rep_arg) : + rep(rep_arg) + { + } + void* padding; + cord_internal::CordRepFlat* rep; + }; + struct Short + { + char data[sizeof(Long) - 1]; + char raw_size = 1; + }; +#else + struct Long + { + explicit Long(cord_internal::CordRepFlat* rep_arg) : + rep(rep_arg) + { + } + cord_internal::CordRepFlat* rep; + void* padding; + }; + struct Short + { + char raw_size = 1; + char data[sizeof(Long) - 1]; + }; +#endif + + union + { + Long long_rep; + Short short_rep; + }; + }; + + // Power2 functions + static bool IsPow2(size_t size) + { + return absl::has_single_bit(size); + } + static size_t Log2Floor(size_t size) + { + return static_cast(absl::bit_width(size) - 1); + } + static size_t Log2Ceil(size_t size) + { + return static_cast(absl::bit_width(size - 1)); + } + + // Implementation of `CreateWithCustomLimit()`. + // This implementation allows for future memory allocation hints to + // be passed down into the CordRepFlat allocation function. + template + static CordBuffer CreateWithCustomLimitImpl(size_t block_size, size_t capacity, AllocationHints... hints); + + // Consumes the value contained in this instance and resets the instance. + // This method returns a non-null Cordrep* if the current instances manages a + // CordRep*, and resets the instance to an empty SSO instance. If the current + // instance is an SSO instance, then this method returns nullptr and sets + // `short_value` to the inlined data value. In either case, the current + // instance length is reset to zero. + // This method is intended to be used by Cord internal functions only. + cord_internal::CordRep* ConsumeValue(absl::string_view& short_value) + { + cord_internal::CordRep* rep = nullptr; + if (rep_.is_short()) + { + short_value = absl::string_view(rep_.data(), rep_.short_length()); + } + else + { + rep = rep_.rep(); + } + rep_.set_short_length(0); + return rep; + } + + // Internal constructor. + explicit CordBuffer(cord_internal::CordRepFlat* rep) : + rep_(rep) + { + assert(rep != nullptr); + } + + Rep rep_; + + friend class Cord; + friend class CordBufferTestPeer; + }; + + inline constexpr size_t CordBuffer::MaximumPayload() + { + return cord_internal::kMaxFlatLength; + } + + inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) + { + return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead; + } + + inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) + { + if (capacity > Rep::kInlineCapacity) + { + auto* rep = cord_internal::CordRepFlat::New(capacity); + rep->length = 0; + return CordBuffer(rep); + } + return CordBuffer(); + } + + template + inline CordBuffer CordBuffer::CreateWithCustomLimitImpl( + size_t block_size, size_t capacity, AllocationHints... hints + ) + { + assert(IsPow2(block_size)); + capacity = (std::min)(capacity, kCustomLimit); + block_size = (std::min)(block_size, kCustomLimit); + if (capacity + kOverhead >= block_size) + { + capacity = block_size; + } + else if (capacity <= kDefaultLimit) + { + capacity = capacity + kOverhead; + } + else if (!IsPow2(capacity)) + { + // Check if rounded up to next power 2 is a good enough fit + // with limited waste making it an acceptable direct fit. + const size_t rounded_up = size_t{1} << Log2Ceil(capacity); + const size_t slop = rounded_up - capacity; + if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) + { + capacity = rounded_up; + } + else + { + // Round down to highest power of 2 <= capacity. + // Consider a more aggressive step down if that may reduce the + // risk of fragmentation where 'people are holding it wrong'. + const size_t rounded_down = size_t{1} << Log2Floor(capacity); + capacity = rounded_down; + } + } + const size_t length = capacity - kOverhead; + auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...); + rep->length = 0; + return CordBuffer(rep); + } + + inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size, size_t capacity) + { + return CreateWithCustomLimitImpl(block_size, capacity); + } + + inline CordBuffer::~CordBuffer() + { + if (!rep_.is_short()) + { + cord_internal::CordRepFlat::Delete(rep_.rep()); + } + } + + inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : + rep_(rhs.rep_) + { + rhs.rep_.set_short_length(0); + } + + inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept + { + if (!rep_.is_short()) + cord_internal::CordRepFlat::Delete(rep_.rep()); + rep_ = rhs.rep_; + rhs.rep_.set_short_length(0); + return *this; + } + + inline absl::Span CordBuffer::available() + { + return rep_.is_short() ? rep_.short_available() : rep_.long_available(); + } + + inline absl::Span CordBuffer::available_up_to(size_t size) + { + return available().subspan(0, size); + } + + inline char* CordBuffer::data() + { + return rep_.is_short() ? rep_.data() : rep_.rep()->Data(); + } + + inline const char* CordBuffer::data() const + { + return rep_.is_short() ? rep_.data() : rep_.rep()->Data(); + } + + inline size_t CordBuffer::capacity() const + { + return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity(); + } + + inline size_t CordBuffer::length() const + { + return rep_.is_short() ? rep_.short_length() : rep_.rep()->length; + } + + inline void CordBuffer::SetLength(size_t length) + { + ABSL_HARDENING_ASSERT(length <= capacity()); + if (rep_.is_short()) + { + rep_.set_short_length(length); + } + else + { + rep_.rep()->length = length; + } + } + + inline void CordBuffer::IncreaseLengthBy(size_t n) + { + ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity()); + if (rep_.is_short()) + { + rep_.add_short_length(n); + } + else + { + rep_.rep()->length += n; + } + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORD_BUFFER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/cord_test_helpers.h b/CAPI/cpp/grpc/include/absl/strings/cord_test_helpers.h new file mode 100644 index 00000000..7c5d4b6f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/cord_test_helpers.h @@ -0,0 +1,135 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_STRINGS_CORD_TEST_HELPERS_H_ +#define ABSL_STRINGS_CORD_TEST_HELPERS_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/cord.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Cord sizes relevant for testing + enum class TestCordSize + { + // An empty value + kEmpty = 0, + + // An inlined string value + kInlined = cord_internal::kMaxInline / 2 + 1, + + // 'Well known' SSO lengths (excluding terminating zero). + // libstdcxx has a maximum SSO of 15, libc++ has a maximum SSO of 22. + kStringSso1 = 15, + kStringSso2 = 22, + + // A string value which is too large to fit in inlined data, but small enough + // such that Cord prefers copying the value if possible, i.e.: not stealing + // std::string inputs, or referencing existing CordReps on Append, etc. + kSmall = cord_internal::kMaxBytesToCopy / 2 + 1, + + // A string value large enough that Cord prefers to reference or steal from + // existing inputs rather than copying contents of the input. + kMedium = cord_internal::kMaxFlatLength / 2 + 1, + + // A string value large enough to cause it to be stored in multiple flats. + kLarge = cord_internal::kMaxFlatLength * 4 + }; + + // To string helper + inline absl::string_view ToString(TestCordSize size) + { + switch (size) + { + case TestCordSize::kEmpty: + return "Empty"; + case TestCordSize::kInlined: + return "Inlined"; + case TestCordSize::kSmall: + return "Small"; + case TestCordSize::kStringSso1: + return "StringSso1"; + case TestCordSize::kStringSso2: + return "StringSso2"; + case TestCordSize::kMedium: + return "Medium"; + case TestCordSize::kLarge: + return "Large"; + } + return "???"; + } + + // Returns the length matching the specified size + inline size_t Length(TestCordSize size) + { + return static_cast(size); + } + + // Stream output helper + inline std::ostream& operator<<(std::ostream& stream, TestCordSize size) + { + return stream << ToString(size); + } + + // Creates a multi-segment Cord from an iterable container of strings. The + // resulting Cord is guaranteed to have one segment for every string in the + // container. This allows code to be unit tested with multi-segment Cord + // inputs. + // + // Example: + // + // absl::Cord c = absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"}); + // EXPECT_FALSE(c.GetFlat(&unused)); + // + // The mechanism by which this Cord is created is an implementation detail. Any + // implementation that produces a multi-segment Cord may produce a flat Cord in + // the future as new optimizations are added to the Cord class. + // MakeFragmentedCord will, however, always be updated to return a multi-segment + // Cord. + template + Cord MakeFragmentedCord(const Container& c) + { + Cord result; + for (const auto& s : c) + { + auto* external = new std::string(s); + Cord tmp = absl::MakeCordFromExternal( + *external, [external](absl::string_view) + { delete external; } + ); + tmp.Prepend(result); + result = tmp; + } + return result; + } + + inline Cord MakeFragmentedCord(std::initializer_list list) + { + return MakeFragmentedCord>(list); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORD_TEST_HELPERS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/cordz_test_helpers.h b/CAPI/cpp/grpc/include/absl/strings/cordz_test_helpers.h new file mode 100644 index 00000000..e0163c63 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/cordz_test_helpers.h @@ -0,0 +1,174 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_CORDZ_TEST_HELPERS_H_ +#define ABSL_STRINGS_CORDZ_TEST_HELPERS_H_ + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/strings/cord.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cordz_info.h" +#include "absl/strings/internal/cordz_sample_token.h" +#include "absl/strings/internal/cordz_statistics.h" +#include "absl/strings/internal/cordz_update_tracker.h" +#include "absl/strings/str_cat.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Returns the CordzInfo for the cord, or nullptr if the cord is not sampled. + inline const cord_internal::CordzInfo* GetCordzInfoForTesting( + const Cord& cord + ) + { + if (!cord.contents_.is_tree()) + return nullptr; + return cord.contents_.cordz_info(); + } + + // Returns true if the provided cordz_info is in the list of sampled cords. + inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info, cord_internal::CordzSampleToken token = {}) + { + for (const cord_internal::CordzInfo& info : token) + { + if (cordz_info == &info) + return true; + } + return false; + } + + // Matcher on Cord that verifies all of: + // - the cord is sampled + // - the CordzInfo of the cord is listed / discoverable. + // - the reported CordzStatistics match the cord's actual properties + // - the cord has an (initial) UpdateTracker count of 1 for `method` + MATCHER_P(HasValidCordzInfoOf, method, "CordzInfo matches cord") + { + const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg); + if (cord_info == nullptr) + { + *result_listener << "cord is not sampled"; + return false; + } + if (!CordzInfoIsListed(cord_info)) + { + *result_listener << "cord is sampled, but not listed"; + return false; + } + cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics(); + if (stat.size != arg.size()) + { + *result_listener << "cordz size " << stat.size + << " does not match cord size " << arg.size(); + return false; + } + if (stat.update_tracker.Value(method) != 1) + { + *result_listener << "Expected method count 1 for " << method << ", found " + << stat.update_tracker.Value(method); + return false; + } + return true; + } + + // Matcher on Cord that verifies that the cord is sampled and that the CordzInfo + // update tracker has 'method' with a call count of 'n' + MATCHER_P2(CordzMethodCountEq, method, n, absl::StrCat("CordzInfo method count equals ", n)) + { + const cord_internal::CordzInfo* cord_info = GetCordzInfoForTesting(arg); + if (cord_info == nullptr) + { + *result_listener << "cord is not sampled"; + return false; + } + cord_internal::CordzStatistics stat = cord_info->GetCordzStatistics(); + if (stat.update_tracker.Value(method) != n) + { + *result_listener << "Expected method count " << n << " for " << method + << ", found " << stat.update_tracker.Value(method); + return false; + } + return true; + } + + // Cordz will only update with a new rate once the previously scheduled event + // has fired. When we disable Cordz, a long delay takes place where we won't + // consider profiling new Cords. CordzSampleIntervalHelper will burn through + // that interval and allow for testing that assumes that the average sampling + // interval is a particular value. + class CordzSamplingIntervalHelper + { + public: + explicit CordzSamplingIntervalHelper(int32_t interval) : + orig_mean_interval_(absl::cord_internal::get_cordz_mean_interval()) + { + absl::cord_internal::set_cordz_mean_interval(interval); + absl::cord_internal::cordz_set_next_sample_for_testing(interval); + } + + ~CordzSamplingIntervalHelper() + { + absl::cord_internal::set_cordz_mean_interval(orig_mean_interval_); + absl::cord_internal::cordz_set_next_sample_for_testing(orig_mean_interval_); + } + + private: + int32_t orig_mean_interval_; + }; + + // Wrapper struct managing a small CordRep `rep` + struct TestCordRep + { + cord_internal::CordRepFlat* rep; + + TestCordRep() + { + rep = cord_internal::CordRepFlat::New(100); + rep->length = 100; + memset(rep->Data(), 1, 100); + } + ~TestCordRep() + { + cord_internal::CordRep::Unref(rep); + } + }; + + // Wrapper struct managing a small CordRep `rep`, and + // an InlineData `data` initialized with that CordRep. + struct TestCordData + { + TestCordRep rep; + cord_internal::InlineData data{rep.rep}; + }; + + // Creates a Cord that is not sampled + template + Cord UnsampledCord(Args... args) + { + CordzSamplingIntervalHelper never(9999); + Cord cord(std::forward(args)...); + ABSL_ASSERT(GetCordzInfoForTesting(cord) == nullptr); + return cord; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_CORDZ_TEST_HELPERS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/escaping.h b/CAPI/cpp/grpc/include/absl/strings/escaping.h new file mode 100644 index 00000000..319ad982 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/escaping.h @@ -0,0 +1,171 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: escaping.h +// ----------------------------------------------------------------------------- +// +// This header file contains string utilities involved in escaping and +// unescaping strings in various ways. + +#ifndef ABSL_STRINGS_ESCAPING_H_ +#define ABSL_STRINGS_ESCAPING_H_ + +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/strings/ascii.h" +#include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // CUnescape() + // + // Unescapes a `source` string and copies it into `dest`, rewriting C-style + // escape sequences (https://en.cppreference.com/w/cpp/language/escape) into + // their proper code point equivalents, returning `true` if successful. + // + // The following unescape sequences can be handled: + // + // * ASCII escape sequences ('\n','\r','\\', etc.) to their ASCII equivalents + // * Octal escape sequences ('\nnn') to byte nnn. The unescaped value must + // resolve to a single byte or an error will occur. E.g. values greater than + // 0xff will produce an error. + // * Hexadecimal escape sequences ('\xnn') to byte nn. While an arbitrary + // number of following digits are allowed, the unescaped value must resolve + // to a single byte or an error will occur. E.g. '\x0045' is equivalent to + // '\x45', but '\x1234' will produce an error. + // * Unicode escape sequences ('\unnnn' for exactly four hex digits or + // '\Unnnnnnnn' for exactly eight hex digits, which will be encoded in + // UTF-8. (E.g., `\u2019` unescapes to the three bytes 0xE2, 0x80, and + // 0x99). + // + // If any errors are encountered, this function returns `false`, leaving the + // `dest` output parameter in an unspecified state, and stores the first + // encountered error in `error`. To disable error reporting, set `error` to + // `nullptr` or use the overload with no error reporting below. + // + // Example: + // + // std::string s = "foo\\rbar\\nbaz\\t"; + // std::string unescaped_s; + // if (!absl::CUnescape(s, &unescaped_s) { + // ... + // } + // EXPECT_EQ(unescaped_s, "foo\rbar\nbaz\t"); + bool CUnescape(absl::string_view source, std::string* dest, std::string* error); + + // Overload of `CUnescape()` with no error reporting. + inline bool CUnescape(absl::string_view source, std::string* dest) + { + return CUnescape(source, dest, nullptr); + } + + // CEscape() + // + // Escapes a 'src' string using C-style escapes sequences + // (https://en.cppreference.com/w/cpp/language/escape), escaping other + // non-printable/non-whitespace bytes as octal sequences (e.g. "\377"). + // + // Example: + // + // std::string s = "foo\rbar\tbaz\010\011\012\013\014\x0d\n"; + // std::string escaped_s = absl::CEscape(s); + // EXPECT_EQ(escaped_s, "foo\\rbar\\tbaz\\010\\t\\n\\013\\014\\r\\n"); + std::string CEscape(absl::string_view src); + + // CHexEscape() + // + // Escapes a 'src' string using C-style escape sequences, escaping + // other non-printable/non-whitespace bytes as hexadecimal sequences (e.g. + // "\xFF"). + // + // Example: + // + // std::string s = "foo\rbar\tbaz\010\011\012\013\014\x0d\n"; + // std::string escaped_s = absl::CHexEscape(s); + // EXPECT_EQ(escaped_s, "foo\\rbar\\tbaz\\x08\\t\\n\\x0b\\x0c\\r\\n"); + std::string CHexEscape(absl::string_view src); + + // Utf8SafeCEscape() + // + // Escapes a 'src' string using C-style escape sequences, escaping bytes as + // octal sequences, and passing through UTF-8 characters without conversion. + // I.e., when encountering any bytes with their high bit set, this function + // will not escape those values, whether or not they are valid UTF-8. + std::string Utf8SafeCEscape(absl::string_view src); + + // Utf8SafeCHexEscape() + // + // Escapes a 'src' string using C-style escape sequences, escaping bytes as + // hexadecimal sequences, and passing through UTF-8 characters without + // conversion. + std::string Utf8SafeCHexEscape(absl::string_view src); + + // Base64Escape() + // + // Encodes a `src` string into a base64-encoded 'dest' string with padding + // characters. This function conforms with RFC 4648 section 4 (base64) and RFC + // 2045. + void Base64Escape(absl::string_view src, std::string* dest); + std::string Base64Escape(absl::string_view src); + + // WebSafeBase64Escape() + // + // Encodes a `src` string into a base64 string, like Base64Escape() does, but + // outputs '-' instead of '+' and '_' instead of '/', and does not pad 'dest'. + // This function conforms with RFC 4648 section 5 (base64url). + void WebSafeBase64Escape(absl::string_view src, std::string* dest); + std::string WebSafeBase64Escape(absl::string_view src); + + // Base64Unescape() + // + // Converts a `src` string encoded in Base64 (RFC 4648 section 4) to its binary + // equivalent, writing it to a `dest` buffer, returning `true` on success. If + // `src` contains invalid characters, `dest` is cleared and returns `false`. + // If padding is included (note that `Base64Escape()` does produce it), it must + // be correct. In the padding, '=' and '.' are treated identically. + bool Base64Unescape(absl::string_view src, std::string* dest); + + // WebSafeBase64Unescape() + // + // Converts a `src` string encoded in "web safe" Base64 (RFC 4648 section 5) to + // its binary equivalent, writing it to a `dest` buffer. If `src` contains + // invalid characters, `dest` is cleared and returns `false`. If padding is + // included (note that `WebSafeBase64Escape()` does not produce it), it must be + // correct. In the padding, '=' and '.' are treated identically. + bool WebSafeBase64Unescape(absl::string_view src, std::string* dest); + + // HexStringToBytes() + // + // Converts an ASCII hex string into bytes, returning binary data of length + // `from.size()/2`. + std::string HexStringToBytes(absl::string_view from); + + // BytesToHexString() + // + // Converts binary data into an ASCII text string, returning a string of size + // `2*from.size()`. + std::string BytesToHexString(absl::string_view from); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_ESCAPING_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/char_map.h b/CAPI/cpp/grpc/include/absl/strings/internal/char_map.h new file mode 100644 index 00000000..b633f808 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/char_map.h @@ -0,0 +1,212 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Character Map Class +// +// A fast, bit-vector map for 8-bit unsigned characters. +// This class is useful for non-character purposes as well. + +#ifndef ABSL_STRINGS_INTERNAL_CHAR_MAP_H_ +#define ABSL_STRINGS_INTERNAL_CHAR_MAP_H_ + +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + class Charmap + { + public: + constexpr Charmap() : + m_() + { + } + + // Initializes with a given char*. Note that NUL is not treated as + // a terminator, but rather a char to be flicked. + Charmap(const char* str, int len) : + m_() + { + while (len--) + SetChar(*str++); + } + + // Initializes with a given char*. NUL is treated as a terminator + // and will not be in the charmap. + explicit Charmap(const char* str) : + m_() + { + while (*str) + SetChar(*str++); + } + + constexpr bool contains(unsigned char c) const + { + return (m_[c / 64] >> (c % 64)) & 0x1; + } + + // Returns true if and only if a character exists in both maps. + bool IntersectsWith(const Charmap& c) const + { + for (size_t i = 0; i < ABSL_ARRAYSIZE(m_); ++i) + { + if ((m_[i] & c.m_[i]) != 0) + return true; + } + return false; + } + + bool IsZero() const + { + for (uint64_t c : m_) + { + if (c != 0) + return false; + } + return true; + } + + // Containing only a single specified char. + static constexpr Charmap Char(char x) + { + return Charmap(CharMaskForWord(x, 0), CharMaskForWord(x, 1), CharMaskForWord(x, 2), CharMaskForWord(x, 3)); + } + + // Containing all the chars in the C-string 's'. + static constexpr Charmap FromString(const char* s) + { + Charmap ret; + while (*s) + ret = ret | Char(*s++); + return ret; + } + + // Containing all the chars in the closed interval [lo,hi]. + static constexpr Charmap Range(char lo, char hi) + { + return Charmap(RangeForWord(lo, hi, 0), RangeForWord(lo, hi, 1), RangeForWord(lo, hi, 2), RangeForWord(lo, hi, 3)); + } + + friend constexpr Charmap operator&(const Charmap& a, const Charmap& b) + { + return Charmap(a.m_[0] & b.m_[0], a.m_[1] & b.m_[1], a.m_[2] & b.m_[2], a.m_[3] & b.m_[3]); + } + + friend constexpr Charmap operator|(const Charmap& a, const Charmap& b) + { + return Charmap(a.m_[0] | b.m_[0], a.m_[1] | b.m_[1], a.m_[2] | b.m_[2], a.m_[3] | b.m_[3]); + } + + friend constexpr Charmap operator~(const Charmap& a) + { + return Charmap(~a.m_[0], ~a.m_[1], ~a.m_[2], ~a.m_[3]); + } + + private: + constexpr Charmap(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3) : + m_{b0, b1, b2, b3} + { + } + + static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) + { + return OpenRangeFromZeroForWord(static_cast(hi) + 1, word) & + ~OpenRangeFromZeroForWord(static_cast(lo), word); + } + + // All the chars in the specified word of the range [0, upper). + static constexpr uint64_t OpenRangeFromZeroForWord(uint64_t upper, uint64_t word) + { + return (upper <= 64 * word) ? 0 : (upper >= 64 * (word + 1)) ? ~static_cast(0) : + (~static_cast(0) >> (64 - upper % 64)); + } + + static constexpr uint64_t CharMaskForWord(char x, uint64_t word) + { + const auto unsigned_x = static_cast(x); + return (unsigned_x / 64 == word) ? (static_cast(1) << (unsigned_x % 64)) : 0; + } + + void SetChar(char c) + { + const auto unsigned_c = static_cast(c); + m_[unsigned_c / 64] |= static_cast(1) << (unsigned_c % 64); + } + + uint64_t m_[4]; + }; + + // Mirror the char-classifying predicates in + constexpr Charmap UpperCharmap() + { + return Charmap::Range('A', 'Z'); + } + constexpr Charmap LowerCharmap() + { + return Charmap::Range('a', 'z'); + } + constexpr Charmap DigitCharmap() + { + return Charmap::Range('0', '9'); + } + constexpr Charmap AlphaCharmap() + { + return LowerCharmap() | UpperCharmap(); + } + constexpr Charmap AlnumCharmap() + { + return DigitCharmap() | AlphaCharmap(); + } + constexpr Charmap XDigitCharmap() + { + return DigitCharmap() | Charmap::Range('A', 'F') | Charmap::Range('a', 'f'); + } + constexpr Charmap PrintCharmap() + { + return Charmap::Range(0x20, 0x7e); + } + constexpr Charmap SpaceCharmap() + { + return Charmap::FromString("\t\n\v\f\r "); + } + constexpr Charmap CntrlCharmap() + { + return Charmap::Range(0, 0x7f) & ~PrintCharmap(); + } + constexpr Charmap BlankCharmap() + { + return Charmap::FromString("\t "); + } + constexpr Charmap GraphCharmap() + { + return PrintCharmap() & ~SpaceCharmap(); + } + constexpr Charmap PunctCharmap() + { + return GraphCharmap() & ~AlnumCharmap(); + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CHAR_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/charconv_bigint.h b/CAPI/cpp/grpc/include/absl/strings/internal/charconv_bigint.h new file mode 100644 index 00000000..45339a1a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/charconv_bigint.h @@ -0,0 +1,501 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_ +#define ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/ascii.h" +#include "absl/strings/internal/charconv_parse.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // The largest power that 5 that can be raised to, and still fit in a uint32_t. + constexpr int kMaxSmallPowerOfFive = 13; + // The largest power that 10 that can be raised to, and still fit in a uint32_t. + constexpr int kMaxSmallPowerOfTen = 9; + + ABSL_DLL extern const uint32_t + kFiveToNth[kMaxSmallPowerOfFive + 1]; + ABSL_DLL extern const uint32_t kTenToNth[kMaxSmallPowerOfTen + 1]; + + // Large, fixed-width unsigned integer. + // + // Exact rounding for decimal-to-binary floating point conversion requires very + // large integer math, but a design goal of absl::from_chars is to avoid + // allocating memory. The integer precision needed for decimal-to-binary + // conversions is large but bounded, so a huge fixed-width integer class + // suffices. + // + // This is an intentionally limited big integer class. Only needed operations + // are implemented. All storage lives in an array data member, and all + // arithmetic is done in-place, to avoid requiring separate storage for operand + // and result. + // + // This is an internal class. Some methods live in the .cc file, and are + // instantiated only for the values of max_words we need. + template + class BigUnsigned + { + public: + static_assert(max_words == 4 || max_words == 84, "unsupported max_words value"); + + BigUnsigned() : + size_(0), + words_{} + { + } + explicit constexpr BigUnsigned(uint64_t v) : + size_((v >> 32) ? 2 : v ? 1 : + 0), + words_{static_cast(v & 0xffffffffu), static_cast(v >> 32)} + { + } + + // Constructs a BigUnsigned from the given string_view containing a decimal + // value. If the input string is not a decimal integer, constructs a 0 + // instead. + explicit BigUnsigned(absl::string_view sv) : + size_(0), + words_{} + { + // Check for valid input, returning a 0 otherwise. This is reasonable + // behavior only because this constructor is for unit tests. + if (std::find_if_not(sv.begin(), sv.end(), ascii_isdigit) != sv.end() || + sv.empty()) + { + return; + } + int exponent_adjust = + ReadDigits(sv.data(), sv.data() + sv.size(), Digits10() + 1); + if (exponent_adjust > 0) + { + MultiplyByTenToTheNth(exponent_adjust); + } + } + + // Loads the mantissa value of a previously-parsed float. + // + // Returns the associated decimal exponent. The value of the parsed float is + // exactly *this * 10**exponent. + int ReadFloatMantissa(const ParsedFloat& fp, int significant_digits); + + // Returns the number of decimal digits of precision this type provides. All + // numbers with this many decimal digits or fewer are representable by this + // type. + // + // Analogous to std::numeric_limits::digits10. + static constexpr int Digits10() + { + // 9975007/1035508 is very slightly less than log10(2**32). + return static_cast(max_words) * 9975007 / 1035508; + } + + // Shifts left by the given number of bits. + void ShiftLeft(int count) + { + if (count > 0) + { + const int word_shift = count / 32; + if (word_shift >= max_words) + { + SetToZero(); + return; + } + size_ = (std::min)(size_ + word_shift, max_words); + count %= 32; + if (count == 0) + { + std::copy_backward(words_, words_ + size_ - word_shift, words_ + size_); + } + else + { + for (int i = (std::min)(size_, max_words - 1); i > word_shift; --i) + { + words_[i] = (words_[i - word_shift] << count) | + (words_[i - word_shift - 1] >> (32 - count)); + } + words_[word_shift] = words_[0] << count; + // Grow size_ if necessary. + if (size_ < max_words && words_[size_]) + { + ++size_; + } + } + std::fill_n(words_, word_shift, 0u); + } + } + + // Multiplies by v in-place. + void MultiplyBy(uint32_t v) + { + if (size_ == 0 || v == 1) + { + return; + } + if (v == 0) + { + SetToZero(); + return; + } + const uint64_t factor = v; + uint64_t window = 0; + for (int i = 0; i < size_; ++i) + { + window += factor * words_[i]; + words_[i] = window & 0xffffffff; + window >>= 32; + } + // If carry bits remain and there's space for them, grow size_. + if (window && size_ < max_words) + { + words_[size_] = window & 0xffffffff; + ++size_; + } + } + + void MultiplyBy(uint64_t v) + { + uint32_t words[2]; + words[0] = static_cast(v); + words[1] = static_cast(v >> 32); + if (words[1] == 0) + { + MultiplyBy(words[0]); + } + else + { + MultiplyBy(2, words); + } + } + + // Multiplies in place by 5 to the power of n. n must be non-negative. + void MultiplyByFiveToTheNth(int n) + { + while (n >= kMaxSmallPowerOfFive) + { + MultiplyBy(kFiveToNth[kMaxSmallPowerOfFive]); + n -= kMaxSmallPowerOfFive; + } + if (n > 0) + { + MultiplyBy(kFiveToNth[n]); + } + } + + // Multiplies in place by 10 to the power of n. n must be non-negative. + void MultiplyByTenToTheNth(int n) + { + if (n > kMaxSmallPowerOfTen) + { + // For large n, raise to a power of 5, then shift left by the same amount. + // (10**n == 5**n * 2**n.) This requires fewer multiplications overall. + MultiplyByFiveToTheNth(n); + ShiftLeft(n); + } + else if (n > 0) + { + // We can do this more quickly for very small N by using a single + // multiplication. + MultiplyBy(kTenToNth[n]); + } + } + + // Returns the value of 5**n, for non-negative n. This implementation uses + // a lookup table, and is faster then seeding a BigUnsigned with 1 and calling + // MultiplyByFiveToTheNth(). + static BigUnsigned FiveToTheNth(int n); + + // Multiplies by another BigUnsigned, in-place. + template + void MultiplyBy(const BigUnsigned& other) + { + MultiplyBy(other.size(), other.words()); + } + + void SetToZero() + { + std::fill_n(words_, size_, 0u); + size_ = 0; + } + + // Returns the value of the nth word of this BigUnsigned. This is + // range-checked, and returns 0 on out-of-bounds accesses. + uint32_t GetWord(int index) const + { + if (index < 0 || index >= size_) + { + return 0; + } + return words_[index]; + } + + // Returns this integer as a decimal string. This is not used in the decimal- + // to-binary conversion; it is intended to aid in testing. + std::string ToString() const; + + int size() const + { + return size_; + } + const uint32_t* words() const + { + return words_; + } + + private: + // Reads the number between [begin, end), possibly containing a decimal point, + // into this BigUnsigned. + // + // Callers are required to ensure [begin, end) contains a valid number, with + // one or more decimal digits and at most one decimal point. This routine + // will behave unpredictably if these preconditions are not met. + // + // Only the first `significant_digits` digits are read. Digits beyond this + // limit are "sticky": If the final significant digit is 0 or 5, and if any + // dropped digit is nonzero, then that final significant digit is adjusted up + // to 1 or 6. This adjustment allows for precise rounding. + // + // Returns `exponent_adjustment`, a power-of-ten exponent adjustment to + // account for the decimal point and for dropped significant digits. After + // this function returns, + // actual_value_of_parsed_string ~= *this * 10**exponent_adjustment. + int ReadDigits(const char* begin, const char* end, int significant_digits); + + // Performs a step of big integer multiplication. This computes the full + // (64-bit-wide) values that should be added at the given index (step), and + // adds to that location in-place. + // + // Because our math all occurs in place, we must multiply starting from the + // highest word working downward. (This is a bit more expensive due to the + // extra carries involved.) + // + // This must be called in steps, for each word to be calculated, starting from + // the high end and working down to 0. The first value of `step` should be + // `std::min(original_size + other.size_ - 2, max_words - 1)`. + // The reason for this expression is that multiplying the i'th word from one + // multiplicand and the j'th word of another multiplicand creates a + // two-word-wide value to be stored at the (i+j)'th element. The highest + // word indices we will access are `original_size - 1` from this object, and + // `other.size_ - 1` from our operand. Therefore, + // `original_size + other.size_ - 2` is the first step we should calculate, + // but limited on an upper bound by max_words. + + // Working from high-to-low ensures that we do not overwrite the portions of + // the initial value of *this which are still needed for later steps. + // + // Once called with step == 0, *this contains the result of the + // multiplication. + // + // `original_size` is the size_ of *this before the first call to + // MultiplyStep(). `other_words` and `other_size` are the contents of our + // operand. `step` is the step to perform, as described above. + void MultiplyStep(int original_size, const uint32_t* other_words, int other_size, int step); + + void MultiplyBy(int other_size, const uint32_t* other_words) + { + const int original_size = size_; + const int first_step = + (std::min)(original_size + other_size - 2, max_words - 1); + for (int step = first_step; step >= 0; --step) + { + MultiplyStep(original_size, other_words, other_size, step); + } + } + + // Adds a 32-bit value to the index'th word, with carry. + void AddWithCarry(int index, uint32_t value) + { + if (value) + { + while (index < max_words && value > 0) + { + words_[index] += value; + // carry if we overflowed in this word: + if (value > words_[index]) + { + value = 1; + ++index; + } + else + { + value = 0; + } + } + size_ = (std::min)(max_words, (std::max)(index + 1, size_)); + } + } + + void AddWithCarry(int index, uint64_t value) + { + if (value && index < max_words) + { + uint32_t high = value >> 32; + uint32_t low = value & 0xffffffff; + words_[index] += low; + if (words_[index] < low) + { + ++high; + if (high == 0) + { + // Carry from the low word caused our high word to overflow. + // Short circuit here to do the right thing. + AddWithCarry(index + 2, static_cast(1)); + return; + } + } + if (high > 0) + { + AddWithCarry(index + 1, high); + } + else + { + // Normally 32-bit AddWithCarry() sets size_, but since we don't call + // it when `high` is 0, do it ourselves here. + size_ = (std::min)(max_words, (std::max)(index + 1, size_)); + } + } + } + + // Divide this in place by a constant divisor. Returns the remainder of the + // division. + template + uint32_t DivMod() + { + uint64_t accumulator = 0; + for (int i = size_ - 1; i >= 0; --i) + { + accumulator <<= 32; + accumulator += words_[i]; + // accumulator / divisor will never overflow an int32_t in this loop + words_[i] = static_cast(accumulator / divisor); + accumulator = accumulator % divisor; + } + while (size_ > 0 && words_[size_ - 1] == 0) + { + --size_; + } + return static_cast(accumulator); + } + + // The number of elements in words_ that may carry significant values. + // All elements beyond this point are 0. + // + // When size_ is 0, this BigUnsigned stores the value 0. + // When size_ is nonzero, is *not* guaranteed that words_[size_ - 1] is + // nonzero. This can occur due to overflow truncation. + // In particular, x.size_ != y.size_ does *not* imply x != y. + int size_; + uint32_t words_[max_words]; + }; + + // Compares two big integer instances. + // + // Returns -1 if lhs < rhs, 0 if lhs == rhs, and 1 if lhs > rhs. + template + int Compare(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + int limit = (std::max)(lhs.size(), rhs.size()); + for (int i = limit - 1; i >= 0; --i) + { + const uint32_t lhs_word = lhs.GetWord(i); + const uint32_t rhs_word = rhs.GetWord(i); + if (lhs_word < rhs_word) + { + return -1; + } + else if (lhs_word > rhs_word) + { + return 1; + } + } + return 0; + } + + template + bool operator==(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + int limit = (std::max)(lhs.size(), rhs.size()); + for (int i = 0; i < limit; ++i) + { + if (lhs.GetWord(i) != rhs.GetWord(i)) + { + return false; + } + } + return true; + } + + template + bool operator!=(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + return !(lhs == rhs); + } + + template + bool operator<(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + return Compare(lhs, rhs) == -1; + } + + template + bool operator>(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + return rhs < lhs; + } + template + bool operator<=(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + return !(rhs < lhs); + } + template + bool operator>=(const BigUnsigned& lhs, const BigUnsigned& rhs) + { + return !(lhs < rhs); + } + + // Output operator for BigUnsigned, for testing purposes only. + template + std::ostream& operator<<(std::ostream& os, const BigUnsigned& num) + { + return os << num.ToString(); + } + + // Explicit instantiation declarations for the sizes of BigUnsigned that we + // are using. + // + // For now, the choices of 4 and 84 are arbitrary; 4 is a small value that is + // still bigger than an int128, and 84 is a large value we will want to use + // in the from_chars implementation. + // + // Comments justifying the use of 84 belong in the from_chars implementation, + // and will be added in a follow-up CL. + extern template class BigUnsigned<4>; + extern template class BigUnsigned<84>; + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CHARCONV_BIGINT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/charconv_parse.h b/CAPI/cpp/grpc/include/absl/strings/internal/charconv_parse.h new file mode 100644 index 00000000..3452359e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/charconv_parse.h @@ -0,0 +1,104 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_ +#define ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_ + +#include + +#include "absl/base/config.h" +#include "absl/strings/charconv.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // Enum indicating whether a parsed float is a number or special value. + enum class FloatType + { + kNumber, + kInfinity, + kNan + }; + + // The decomposed parts of a parsed `float` or `double`. + struct ParsedFloat + { + // Representation of the parsed mantissa, with the decimal point adjusted to + // make it an integer. + // + // During decimal scanning, this contains 19 significant digits worth of + // mantissa value. If digits beyond this point are found, they + // are truncated, and if any of these dropped digits are nonzero, then + // `mantissa` is inexact, and the full mantissa is stored in [subrange_begin, + // subrange_end). + // + // During hexadecimal scanning, this contains 15 significant hex digits worth + // of mantissa value. Digits beyond this point are sticky -- they are + // truncated, but if any dropped digits are nonzero, the low bit of mantissa + // will be set. (This allows for precise rounding, and avoids the need + // to store the full mantissa in [subrange_begin, subrange_end).) + uint64_t mantissa = 0; + + // Floating point expontent. This reflects any decimal point adjustments and + // any truncated digits from the mantissa. The absolute value of the parsed + // number is represented by mantissa * (base ** exponent), where base==10 for + // decimal floats, and base==2 for hexadecimal floats. + int exponent = 0; + + // The literal exponent value scanned from the input, or 0 if none was + // present. This does not reflect any adjustments applied to mantissa. + int literal_exponent = 0; + + // The type of number scanned. + FloatType type = FloatType::kNumber; + + // When non-null, [subrange_begin, subrange_end) marks a range of characters + // that require further processing. The meaning is dependent on float type. + // If type == kNumber and this is set, this is a "wide input": the input + // mantissa contained more than 19 digits. The range contains the full + // mantissa. It plus `literal_exponent` need to be examined to find the best + // floating point match. + // If type == kNan and this is set, the range marks the contents of a + // matched parenthesized character region after the NaN. + const char* subrange_begin = nullptr; + const char* subrange_end = nullptr; + + // One-past-the-end of the successfully parsed region, or nullptr if no + // matching pattern was found. + const char* end = nullptr; + }; + + // Read the floating point number in the provided range, and populate + // ParsedFloat accordingly. + // + // format_flags is a bitmask value specifying what patterns this API will match. + // `scientific` and `fixed` are honored per std::from_chars rules + // ([utility.from.chars], C++17): if exactly one of these bits is set, then an + // exponent is required, or dislallowed, respectively. + // + // Template parameter `base` must be either 10 or 16. For base 16, a "0x" is + // *not* consumed. The `hex` bit from format_flags is ignored by ParseFloat. + template + ParsedFloat ParseFloat(const char* begin, const char* end, absl::chars_format format_flags); + + extern template ParsedFloat ParseFloat<10>(const char* begin, const char* end, absl::chars_format format_flags); + extern template ParsedFloat ParseFloat<16>(const char* begin, const char* end, absl::chars_format format_flags); + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_STRINGS_INTERNAL_CHARCONV_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_data_edge.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_data_edge.h new file mode 100644 index 00000000..f16a883c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_data_edge.h @@ -0,0 +1,68 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ +#define ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node + // holding a FLAT or EXTERNAL child rep. Requires `rep != nullptr`. + inline bool IsDataEdge(const CordRep* edge) + { + assert(edge != nullptr); + + // The fast path is that `edge` is an EXTERNAL or FLAT node, making the below + // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL + // check in the slow path of the SUBSTRING check to optimize for the hot path. + if (edge->tag == EXTERNAL || edge->tag >= FLAT) + return true; + if (edge->tag == SUBSTRING) + edge = edge->substring()->child; + return edge->tag == EXTERNAL || edge->tag >= FLAT; + } + + // Returns the `absl::string_view` data reference for the provided data edge. + // Requires 'IsDataEdge(edge) == true`. + inline absl::string_view EdgeData(const CordRep* edge) + { + assert(IsDataEdge(edge)); + + size_t offset = 0; + const size_t length = edge->length; + if (edge->IsSubstring()) + { + offset = edge->substring()->start; + edge = edge->substring()->child; + } + return edge->tag >= FLAT ? absl::string_view{edge->flat()->Data() + offset, length} : absl::string_view{edge->external()->base + offset, length}; + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_internal.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_internal.h new file mode 100644 index 00000000..4f069559 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_internal.h @@ -0,0 +1,1107 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_ +#define ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/invoke.h" +#include "absl/base/optimization.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +// We can only add poisoning if we can detect consteval executions. +#if defined(ABSL_HAVE_CONSTANT_EVALUATED) && \ + (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER)) +#define ABSL_INTERNAL_CORD_HAVE_SANITIZER 1 +#endif + +#define ABSL_CORD_INTERNAL_NO_SANITIZE \ + ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // The overhead of a vtable is too much for Cord, so we roll our own subclasses + // using only a single byte to differentiate classes from each other - the "tag" + // byte. Define the subclasses first so we can provide downcasting helper + // functions in the base class. + struct CordRep; + struct CordRepConcat; + struct CordRepExternal; + struct CordRepFlat; + struct CordRepSubstring; + struct CordRepCrc; + class CordRepRing; + class CordRepBtree; + + class CordzInfo; + + // Default feature enable states for cord ring buffers + enum CordFeatureDefaults + { + kCordEnableRingBufferDefault = false, + kCordShallowSubcordsDefault = false + }; + + extern std::atomic cord_ring_buffer_enabled; + extern std::atomic shallow_subcords_enabled; + + inline void enable_cord_ring_buffer(bool enable) + { + cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed); + } + + inline void enable_shallow_subcords(bool enable) + { + shallow_subcords_enabled.store(enable, std::memory_order_relaxed); + } + + enum Constants + { + // The inlined size to use with absl::InlinedVector. + // + // Note: The InlinedVectors in this file (and in cord.h) do not need to use + // the same value for their inlined size. The fact that they do is historical. + // It may be desirable for each to use a different inlined size optimized for + // that InlinedVector's usage. + // + // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for + // the inlined vector size (47 exists for backward compatibility). + kInlinedVectorSize = 47, + + // Prefer copying blocks of at most this size, otherwise reference count. + kMaxBytesToCopy = 511 + }; + + // Emits a fatal error "Unexpected node type: xyz" and aborts the program. + ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep); + + // Fast implementation of memmove for up to 15 bytes. This implementation is + // safe for overlapping regions. If nullify_tail is true, the destination is + // padded with '\0' up to 15 bytes. + template + inline void SmallMemmove(char* dst, const char* src, size_t n) + { + if (n >= 8) + { + assert(n <= 15); + uint64_t buf1; + uint64_t buf2; + memcpy(&buf1, src, 8); + memcpy(&buf2, src + n - 8, 8); + if (nullify_tail) + { + memset(dst + 7, 0, 8); + } + memcpy(dst, &buf1, 8); + memcpy(dst + n - 8, &buf2, 8); + } + else if (n >= 4) + { + uint32_t buf1; + uint32_t buf2; + memcpy(&buf1, src, 4); + memcpy(&buf2, src + n - 4, 4); + if (nullify_tail) + { + memset(dst + 4, 0, 4); + memset(dst + 7, 0, 8); + } + memcpy(dst, &buf1, 4); + memcpy(dst + n - 4, &buf2, 4); + } + else + { + if (n != 0) + { + dst[0] = src[0]; + dst[n / 2] = src[n / 2]; + dst[n - 1] = src[n - 1]; + } + if (nullify_tail) + { + memset(dst + 7, 0, 8); + memset(dst + n, 0, 8); + } + } + } + + // Compact class for tracking the reference count and state flags for CordRep + // instances. Data is stored in an atomic int32_t for compactness and speed. + class RefcountAndFlags + { + public: + constexpr RefcountAndFlags() : + count_{kRefIncrement} + { + } + struct Immortal + { + }; + explicit constexpr RefcountAndFlags(Immortal) : + count_(kImmortalFlag) + { + } + + // Increments the reference count. Imposes no memory ordering. + inline void Increment() + { + count_.fetch_add(kRefIncrement, std::memory_order_relaxed); + } + + // Asserts that the current refcount is greater than 0. If the refcount is + // greater than 1, decrements the reference count. + // + // Returns false if there are no references outstanding; true otherwise. + // Inserts barriers to ensure that state written before this method returns + // false will be visible to a thread that just observed this method returning + // false. Always returns false when the immortal bit is set. + inline bool Decrement() + { + int32_t refcount = count_.load(std::memory_order_acquire); + assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag); + return refcount != kRefIncrement && + (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) & + kHighRefcountMask) != 0; + } + + // Same as Decrement but expect that refcount is greater than 1. + inline bool DecrementExpectHighRefcount() + { + int32_t refcount = + count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel); + assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag); + return (refcount & kHighRefcountMask) != 0; + } + + // Returns the current reference count using acquire semantics. + inline size_t Get() const + { + return static_cast(count_.load(std::memory_order_acquire) >> kNumFlags); + } + + // Returns whether the atomic integer is 1. + // If the reference count is used in the conventional way, a + // reference count of 1 implies that the current thread owns the + // reference and no other thread shares it. + // This call performs the test for a reference count of one, and + // performs the memory barrier needed for the owning thread + // to act on the object, knowing that it has exclusive access to the + // object. Always returns false when the immortal bit is set. + inline bool IsOne() + { + return (count_.load(std::memory_order_acquire) & kRefcountMask) == + kRefIncrement; + } + + bool IsImmortal() const + { + return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0; + } + + private: + // We reserve the bottom bits for flags. + // kImmortalBit indicates that this entity should never be collected; it is + // used for the StringConstant constructor to avoid collecting immutable + // constant cords. + // kReservedFlag is reserved for future use. + enum Flags + { + kNumFlags = 2, + + kImmortalFlag = 0x1, + kReservedFlag = 0x2, + kRefIncrement = (1 << kNumFlags), + + // Bitmask to use when checking refcount by equality. This masks out + // all flags except kImmortalFlag, which is part of the refcount for + // purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1 + // if the immortal bit is set.) + kRefcountMask = ~kReservedFlag, + + // Bitmask to use when checking if refcount is equal to 1 and not + // immortal when decrementing the refcount. This masks out kRefIncrement and + // all flags except kImmortalFlag. If the masked RefcountAndFlags is 0, we + // assume the refcount is equal to 1, since we know it's not immortal and + // not greater than 1. If the masked RefcountAndFlags is not 0, we can + // assume the refcount is not equal to 1 since either a higher bit in the + // refcount is set, or kImmortal is set. + kHighRefcountMask = kRefcountMask & ~kRefIncrement, + }; + + std::atomic count_; + }; + + // Various representations that we allow + enum CordRepKind + { + UNUSED_0 = 0, + SUBSTRING = 1, + CRC = 2, + BTREE = 3, + RING = 4, + EXTERNAL = 5, + + // We have different tags for different sized flat arrays, + // starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an + // allocated range of 32 bytes to 256 KB. The current granularity is: + // - 8 byte granularity for flat sizes in [32 - 512] + // - 64 byte granularity for flat sizes in (512 - 8KiB] + // - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB] + // If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should + // be adjusted as well as the Tag <---> Size mapping logic so that FLAT still + // represents the minimum flat allocation size. (32 bytes as of now). + FLAT = 6, + MAX_FLAT_TAG = 248 + }; + + // There are various locations where we want to check if some rep is a 'plain' + // data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we + // can perform this check in a single branch as 'tag >= EXTERNAL' + // Likewise, we have some locations where we check for 'ring or external/flat', + // so likewise align RING to EXTERNAL. + // Note that we can leave this optimization to the compiler. The compiler will + // DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`. + static_assert(RING == BTREE + 1, "BTREE and RING not consecutive"); + static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive"); + static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive"); + + struct CordRep + { + // Result from an `extract edge` operation. Contains the (possibly changed) + // tree node as well as the extracted edge, or {tree, nullptr} if no edge + // could be extracted. + // On success, the returned `tree` value is null if `extracted` was the only + // data edge inside the tree, a data edge if there were only two data edges in + // the tree, or the (possibly new / smaller) remaining tree with the extracted + // data edge removed. + struct ExtractResult + { + CordRep* tree; + CordRep* extracted; + }; + + CordRep() = default; + constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l) : + length(l), + refcount(immortal), + tag(EXTERNAL), + storage{} + { + } + + // The following three fields have to be less than 32 bytes since + // that is the smallest supported flat node size. Some code optimizations rely + // on the specific layout of these fields. Notably: the non-trivial field + // `refcount` being preceded by `length`, and being tailed by POD data + // members only. + // # LINT.IfChange + size_t length; + RefcountAndFlags refcount; + // If tag < FLAT, it represents CordRepKind and indicates the type of node. + // Otherwise, the node type is CordRepFlat and the tag is the encoded size. + uint8_t tag; + + // `storage` provides two main purposes: + // - the starting point for FlatCordRep.Data() [flexible-array-member] + // - 3 bytes of additional storage for use by derived classes. + // The latter is used by CordrepConcat and CordRepBtree. CordRepConcat stores + // a 'depth' value in storage[0], and the (future) CordRepBtree class stores + // `height`, `begin` and `end` in the 3 entries. Otherwise we would need to + // allocate room for these in the derived class, as not all compilers reuse + // padding space from the base class (clang and gcc do, MSVC does not, etc) + uint8_t storage[3]; + // # LINT.ThenChange(cord_rep_btree.h:copy_raw) + + // Returns true if this instance's tag matches the requested type. + constexpr bool IsRing() const + { + return tag == RING; + } + constexpr bool IsSubstring() const + { + return tag == SUBSTRING; + } + constexpr bool IsCrc() const + { + return tag == CRC; + } + constexpr bool IsExternal() const + { + return tag == EXTERNAL; + } + constexpr bool IsFlat() const + { + return tag >= FLAT; + } + constexpr bool IsBtree() const + { + return tag == BTREE; + } + + inline CordRepRing* ring(); + inline const CordRepRing* ring() const; + inline CordRepSubstring* substring(); + inline const CordRepSubstring* substring() const; + inline CordRepCrc* crc(); + inline const CordRepCrc* crc() const; + inline CordRepExternal* external(); + inline const CordRepExternal* external() const; + inline CordRepFlat* flat(); + inline const CordRepFlat* flat() const; + inline CordRepBtree* btree(); + inline const CordRepBtree* btree() const; + + // -------------------------------------------------------------------- + // Memory management + + // Destroys the provided `rep`. + static void Destroy(CordRep* rep); + + // Increments the reference count of `rep`. + // Requires `rep` to be a non-null pointer value. + static inline CordRep* Ref(CordRep* rep); + + // Decrements the reference count of `rep`. Destroys rep if count reaches + // zero. Requires `rep` to be a non-null pointer value. + static inline void Unref(CordRep* rep); + }; + + struct CordRepSubstring : public CordRep + { + size_t start; // Starting offset of substring in child + CordRep* child; + + // Creates a substring on `child`, adopting a reference on `child`. + // Requires `child` to be either a flat or external node, and `pos` and `n` to + // form a non-empty partial sub range of `'child`, i.e.: + // `n > 0 && n < length && n + pos <= length` + static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n); + + // Creates a substring of `rep`. Does not adopt a reference on `rep`. + // Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`. + // If `n == rep->length` then this method returns `CordRep::Ref(rep)` + // If `rep` is a substring of a flat or external node, then this method will + // return a new substring of that flat or external node with `pos` adjusted + // with the original `start` position. + static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n); + }; + + // Type for function pointer that will invoke the releaser function and also + // delete the `CordRepExternalImpl` corresponding to the passed in + // `CordRepExternal`. + using ExternalReleaserInvoker = void (*)(CordRepExternal*); + + // External CordReps are allocated together with a type erased releaser. The + // releaser is stored in the memory directly following the CordRepExternal. + struct CordRepExternal : public CordRep + { + CordRepExternal() = default; + explicit constexpr CordRepExternal(absl::string_view str) : + CordRep(RefcountAndFlags::Immortal{}, str.size()), + base(str.data()), + releaser_invoker(nullptr) + { + } + + const char* base; + // Pointer to function that knows how to call and destroy the releaser. + ExternalReleaserInvoker releaser_invoker; + + // Deletes (releases) the external rep. + // Requires rep != nullptr and rep->IsExternal() + static void Delete(CordRep* rep); + }; + + struct Rank1 + { + }; + struct Rank0 : Rank1 + { + }; + + template> + void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) + { + ::absl::base_internal::invoke(std::forward(releaser), data); + } + + template> + void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) + { + ::absl::base_internal::invoke(std::forward(releaser)); + } + + // We use CompressedTuple so that we can benefit from EBCO. + template + struct CordRepExternalImpl : public CordRepExternal, public ::absl::container_internal::CompressedTuple + { + // The extra int arg is so that we can avoid interfering with copy/move + // constructors while still benefitting from perfect forwarding. + template + CordRepExternalImpl(T&& releaser, int) : + CordRepExternalImpl::CompressedTuple(std::forward(releaser)) + { + this->releaser_invoker = &Release; + } + + ~CordRepExternalImpl() + { + InvokeReleaser(Rank0{}, std::move(this->template get<0>()), absl::string_view(base, length)); + } + + static void Release(CordRepExternal* rep) + { + delete static_cast(rep); + } + }; + + inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos, size_t n) + { + assert(child != nullptr); + assert(n > 0); + assert(n < child->length); + assert(pos < child->length); + assert(n <= child->length - pos); + + // TODO(b/217376272): Harden internal logic. + // Move to strategical places inside the Cord logic and make this an assert. + if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) + { + LogFatalNodeType(child); + } + + CordRepSubstring* rep = new CordRepSubstring(); + rep->length = n; + rep->tag = SUBSTRING; + rep->start = pos; + rep->child = child; + return rep; + } + + inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos, size_t n) + { + assert(rep != nullptr); + assert(n != 0); + assert(pos < rep->length); + assert(n <= rep->length - pos); + if (n == rep->length) + return CordRep::Ref(rep); + if (rep->IsSubstring()) + { + pos += rep->substring()->start; + rep = rep->substring()->child; + } + CordRepSubstring* substr = new CordRepSubstring(); + substr->length = n; + substr->tag = SUBSTRING; + substr->start = pos; + substr->child = CordRep::Ref(rep); + return substr; + } + + inline void CordRepExternal::Delete(CordRep* rep) + { + assert(rep != nullptr && rep->IsExternal()); + auto* rep_external = static_cast(rep); + assert(rep_external->releaser_invoker != nullptr); + rep_external->releaser_invoker(rep_external); + } + + template + struct ConstInitExternalStorage + { + ABSL_CONST_INIT static CordRepExternal value; + }; + + template + ABSL_CONST_INIT CordRepExternal + ConstInitExternalStorage::value(Str::value); + + enum + { + kMaxInline = 15, + }; + + constexpr char GetOrNull(absl::string_view data, size_t pos) + { + return pos < data.size() ? data[pos] : '\0'; + } + + // We store cordz_info as 64 bit pointer value in little endian format. This + // guarantees that the least significant byte of cordz_info matches the first + // byte of the inline data representation in `data`, which holds the inlined + // size or the 'is_tree' bit. + using cordz_info_t = int64_t; + + // Assert that the `cordz_info` pointer value perfectly overlaps the last half + // of `data` and can hold a pointer value. + static_assert(sizeof(cordz_info_t) * 2 == kMaxInline + 1, ""); + static_assert(sizeof(cordz_info_t) >= sizeof(intptr_t), ""); + + // LittleEndianByte() creates a little endian representation of 'value', i.e.: + // a little endian value where the first byte in the host's representation + // holds 'value`, with all other bytes being 0. + static constexpr cordz_info_t LittleEndianByte(unsigned char value) + { +#if defined(ABSL_IS_BIG_ENDIAN) + return static_cast(value) << ((sizeof(cordz_info_t) - 1) * 8); +#else + return value; +#endif + } + + class InlineData + { + public: + // DefaultInitType forces the use of the default initialization constructor. + enum DefaultInitType + { + kDefaultInit + }; + + // kNullCordzInfo holds the little endian representation of intptr_t(1) + // This is the 'null' / initial value of 'cordz_info'. The null value + // is specifically big endian 1 as with 64-bit pointers, the last + // byte of cordz_info overlaps with the last byte holding the tag. + static constexpr cordz_info_t kNullCordzInfo = LittleEndianByte(1); + + // kTagOffset contains the offset of the control byte / tag. This constant is + // intended mostly for debugging purposes: do not remove this constant as it + // is actively inspected and used by gdb pretty printing code. + static constexpr size_t kTagOffset = 0; + + // Implement `~InlineData()` conditionally: we only need this destructor to + // unpoison poisoned instances under *SAN, and it will only compile correctly + // if the current compiler supports `absl::is_constant_evaluated()`. +#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER + ~InlineData() noexcept + { + unpoison(); + } +#endif + + constexpr InlineData() noexcept + { + poison_this(); + } + + explicit InlineData(DefaultInitType) noexcept : + rep_(kDefaultInit) + { + poison_this(); + } + + explicit InlineData(CordRep* rep) noexcept : + rep_(rep) + { + ABSL_ASSERT(rep != nullptr); + } + + // Explicit constexpr constructor to create a constexpr InlineData + // value. Creates an inlined SSO value if `rep` is null, otherwise + // creates a tree instance value. + constexpr InlineData(absl::string_view sv, CordRep* rep) noexcept + : + rep_(rep ? Rep(rep) : Rep(sv)) + { + poison(); + } + + constexpr InlineData(const InlineData& rhs) noexcept; + InlineData& operator=(const InlineData& rhs) noexcept; + + friend bool operator==(const InlineData& lhs, const InlineData& rhs) + { +#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER + const Rep l = lhs.rep_.SanitizerSafeCopy(); + const Rep r = rhs.rep_.SanitizerSafeCopy(); + return memcmp(&l, &r, sizeof(l)) == 0; +#else + return memcmp(&lhs, &rhs, sizeof(lhs)) == 0; +#endif + } + friend bool operator!=(const InlineData& lhs, const InlineData& rhs) + { + return !operator==(lhs, rhs); + } + + // Poisons the unused inlined SSO data if the current instance + // is inlined, else un-poisons the entire instance. + constexpr void poison(); + + // Un-poisons this instance. + constexpr void unpoison(); + + // Poisons the current instance. This is used on default initialization. + constexpr void poison_this(); + + // Returns true if the current instance is empty. + // The 'empty value' is an inlined data value of zero length. + bool is_empty() const + { + return rep_.tag() == 0; + } + + // Returns true if the current instance holds a tree value. + bool is_tree() const + { + return (rep_.tag() & 1) != 0; + } + + // Returns true if the current instance holds a cordz_info value. + // Requires the current instance to hold a tree value. + bool is_profiled() const + { + assert(is_tree()); + return rep_.cordz_info() != kNullCordzInfo; + } + + // Returns true if either of the provided instances hold a cordz_info value. + // This method is more efficient than the equivalent `data1.is_profiled() || + // data2.is_profiled()`. Requires both arguments to hold a tree. + static bool is_either_profiled(const InlineData& data1, const InlineData& data2) + { + assert(data1.is_tree() && data2.is_tree()); + return (data1.rep_.cordz_info() | data2.rep_.cordz_info()) != + kNullCordzInfo; + } + + // Returns the cordz_info sampling instance for this instance, or nullptr + // if the current instance is not sampled and does not have CordzInfo data. + // Requires the current instance to hold a tree value. + CordzInfo* cordz_info() const + { + assert(is_tree()); + intptr_t info = static_cast(absl::little_endian::ToHost64( + static_cast(rep_.cordz_info()) + )); + assert(info & 1); + return reinterpret_cast(info - 1); + } + + // Sets the current cordz_info sampling instance for this instance, or nullptr + // if the current instance is not sampled and does not have CordzInfo data. + // Requires the current instance to hold a tree value. + void set_cordz_info(CordzInfo* cordz_info) + { + assert(is_tree()); + uintptr_t info = reinterpret_cast(cordz_info) | 1; + rep_.set_cordz_info( + static_cast(absl::little_endian::FromHost64(info)) + ); + } + + // Resets the current cordz_info to null / empty. + void clear_cordz_info() + { + assert(is_tree()); + rep_.set_cordz_info(kNullCordzInfo); + } + + // Returns a read only pointer to the character data inside this instance. + // Requires the current instance to hold inline data. + const char* as_chars() const + { + assert(!is_tree()); + return rep_.as_chars(); + } + + // Returns a mutable pointer to the character data inside this instance. + // Should be used for 'write only' operations setting an inlined value. + // Applications can set the value of inlined data either before or after + // setting the inlined size, i.e., both of the below are valid: + // + // // Set inlined data and inline size + // memcpy(data_.as_chars(), data, size); + // data_.set_inline_size(size); + // + // // Set inlined size and inline data + // data_.set_inline_size(size); + // memcpy(data_.as_chars(), data, size); + // + // It's an error to read from the returned pointer without a preceding write + // if the current instance does not hold inline data, i.e.: is_tree() == true. + char* as_chars() + { + return rep_.as_chars(); + } + + // Returns the tree value of this value. + // Requires the current instance to hold a tree value. + CordRep* as_tree() const + { + assert(is_tree()); + return rep_.tree(); + } + + void set_inline_data(const char* data, size_t n) + { + ABSL_ASSERT(n <= kMaxInline); + unpoison(); + rep_.set_tag(static_cast(n << 1)); + SmallMemmove(rep_.as_chars(), data, n); + poison(); + } + + void copy_max_inline_to(char* dst) const + { + assert(!is_tree()); + memcpy(dst, rep_.SanitizerSafeCopy().as_chars(), kMaxInline); + } + + // Initialize this instance to holding the tree value `rep`, + // initializing the cordz_info to null, i.e.: 'not profiled'. + void make_tree(CordRep* rep) + { + unpoison(); + rep_.make_tree(rep); + } + + // Set the tree value of this instance to 'rep`. + // Requires the current instance to already hold a tree value. + // Does not affect the value of cordz_info. + void set_tree(CordRep* rep) + { + assert(is_tree()); + rep_.set_tree(rep); + } + + // Returns the size of the inlined character data inside this instance. + // Requires the current instance to hold inline data. + size_t inline_size() const + { + return rep_.inline_size(); + } + + // Sets the size of the inlined character data inside this instance. + // Requires `size` to be <= kMaxInline. + // See the documentation on 'as_chars()' for more information and examples. + void set_inline_size(size_t size) + { + unpoison(); + rep_.set_inline_size(size); + poison(); + } + + // Compares 'this' inlined data with rhs. The comparison is a straightforward + // lexicographic comparison. `Compare()` returns values as follows: + // + // -1 'this' InlineData instance is smaller + // 0 the InlineData instances are equal + // 1 'this' InlineData instance larger + int Compare(const InlineData& rhs) const + { + return Compare(rep_.SanitizerSafeCopy(), rhs.rep_.SanitizerSafeCopy()); + } + + private: + struct Rep + { + // See cordz_info_t for forced alignment and size of `cordz_info` details. + struct AsTree + { + explicit constexpr AsTree(absl::cord_internal::CordRep* tree) : + rep(tree) + { + } + cordz_info_t cordz_info = kNullCordzInfo; + absl::cord_internal::CordRep* rep; + }; + + explicit Rep(DefaultInitType) + { + } + constexpr Rep() : + data{0} + { + } + constexpr Rep(const Rep&) = default; + constexpr Rep& operator=(const Rep&) = default; + + explicit constexpr Rep(CordRep* rep) : + as_tree(rep) + { + } + + explicit constexpr Rep(absl::string_view chars) : + data{static_cast((chars.size() << 1)), GetOrNull(chars, 0), GetOrNull(chars, 1), GetOrNull(chars, 2), GetOrNull(chars, 3), GetOrNull(chars, 4), GetOrNull(chars, 5), GetOrNull(chars, 6), GetOrNull(chars, 7), GetOrNull(chars, 8), GetOrNull(chars, 9), GetOrNull(chars, 10), GetOrNull(chars, 11), GetOrNull(chars, 12), GetOrNull(chars, 13), GetOrNull(chars, 14)} + { + } + + // Disable sanitizer as we must always be able to read `tag`. + ABSL_CORD_INTERNAL_NO_SANITIZE + int8_t tag() const + { + return reinterpret_cast(this)[0]; + } + void set_tag(int8_t rhs) + { + reinterpret_cast(this)[0] = rhs; + } + + char* as_chars() + { + return data + 1; + } + const char* as_chars() const + { + return data + 1; + } + + bool is_tree() const + { + return (tag() & 1) != 0; + } + + size_t inline_size() const + { + ABSL_ASSERT(!is_tree()); + return static_cast(tag()) >> 1; + } + + void set_inline_size(size_t size) + { + ABSL_ASSERT(size <= kMaxInline); + set_tag(static_cast(size << 1)); + } + + CordRep* tree() const + { + return as_tree.rep; + } + void set_tree(CordRep* rhs) + { + as_tree.rep = rhs; + } + + cordz_info_t cordz_info() const + { + return as_tree.cordz_info; + } + void set_cordz_info(cordz_info_t rhs) + { + as_tree.cordz_info = rhs; + } + + void make_tree(CordRep* tree) + { + as_tree.rep = tree; + as_tree.cordz_info = kNullCordzInfo; + } + +#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER + constexpr Rep SanitizerSafeCopy() const + { + if (!absl::is_constant_evaluated()) + { + Rep res; + if (is_tree()) + { + res = *this; + } + else + { + res.set_tag(tag()); + memcpy(res.as_chars(), as_chars(), inline_size()); + } + return res; + } + else + { + return *this; + } + } +#else + constexpr const Rep& SanitizerSafeCopy() const + { + return *this; + } +#endif + + // If the data has length <= kMaxInline, we store it in `data`, and + // store the size in the first char of `data` shifted left + 1. + // Else we store it in a tree and store a pointer to that tree in + // `as_tree.rep` with a tagged pointer to make `tag() & 1` non zero. + union + { + char data[kMaxInline + 1]; + AsTree as_tree; + }; + }; + + // Private implementation of `Compare()` + static inline int Compare(const Rep& lhs, const Rep& rhs) + { + uint64_t x, y; + memcpy(&x, lhs.as_chars(), sizeof(x)); + memcpy(&y, rhs.as_chars(), sizeof(y)); + if (x == y) + { + memcpy(&x, lhs.as_chars() + 7, sizeof(x)); + memcpy(&y, rhs.as_chars() + 7, sizeof(y)); + if (x == y) + { + if (lhs.inline_size() == rhs.inline_size()) + return 0; + return lhs.inline_size() < rhs.inline_size() ? -1 : 1; + } + } + x = absl::big_endian::FromHost64(x); + y = absl::big_endian::FromHost64(y); + return x < y ? -1 : 1; + } + + Rep rep_; + }; + + static_assert(sizeof(InlineData) == kMaxInline + 1, ""); + +#ifdef ABSL_INTERNAL_CORD_HAVE_SANITIZER + + constexpr InlineData::InlineData(const InlineData& rhs) noexcept + : + rep_(rhs.rep_.SanitizerSafeCopy()) + { + poison(); + } + + inline InlineData& InlineData::operator=(const InlineData& rhs) noexcept + { + unpoison(); + rep_ = rhs.rep_.SanitizerSafeCopy(); + poison(); + return *this; + } + + constexpr void InlineData::poison_this() + { + if (!absl::is_constant_evaluated()) + { + container_internal::SanitizerPoisonObject(this); + } + } + + constexpr void InlineData::unpoison() + { + if (!absl::is_constant_evaluated()) + { + container_internal::SanitizerUnpoisonObject(this); + } + } + + constexpr void InlineData::poison() + { + if (!absl::is_constant_evaluated()) + { + if (is_tree()) + { + container_internal::SanitizerUnpoisonObject(this); + } + else if (const size_t size = inline_size()) + { + if (size < kMaxInline) + { + const char* end = rep_.as_chars() + size; + container_internal::SanitizerPoisonMemoryRegion(end, kMaxInline - size); + } + } + else + { + container_internal::SanitizerPoisonObject(this); + } + } + } + +#else // ABSL_INTERNAL_CORD_HAVE_SANITIZER + + constexpr InlineData::InlineData(const InlineData&) noexcept = default; + inline InlineData& InlineData::operator=(const InlineData&) noexcept = default; + + constexpr void InlineData::poison_this() + { + } + constexpr void InlineData::unpoison() + { + } + constexpr void InlineData::poison() + { + } + +#endif // ABSL_INTERNAL_CORD_HAVE_SANITIZER + + inline CordRepSubstring* CordRep::substring() + { + assert(IsSubstring()); + return static_cast(this); + } + + inline const CordRepSubstring* CordRep::substring() const + { + assert(IsSubstring()); + return static_cast(this); + } + + inline CordRepExternal* CordRep::external() + { + assert(IsExternal()); + return static_cast(this); + } + + inline const CordRepExternal* CordRep::external() const + { + assert(IsExternal()); + return static_cast(this); + } + + inline CordRep* CordRep::Ref(CordRep* rep) + { + // ABSL_ASSUME is a workaround for + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585 + ABSL_ASSUME(rep != nullptr); + rep->refcount.Increment(); + return rep; + } + + inline void CordRep::Unref(CordRep* rep) + { + assert(rep != nullptr); + // Expect refcount to be 0. Avoiding the cost of an atomic decrement should + // typically outweigh the cost of an extra branch checking for ref == 1. + if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) + { + Destroy(rep); + } + } + + } // namespace cord_internal + + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_STRINGS_INTERNAL_CORD_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree.h new file mode 100644 index 00000000..7ecac236 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree.h @@ -0,0 +1,1041 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/optimization.h" +#include "absl/strings/internal/cord_data_edge.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // `SetCordBtreeExhaustiveValidation()` can be set to force exhaustive + // validation in debug assertions, and code that calls `IsValid()` + // explicitly. By default, assertions should be relatively cheap and + // AssertValid() can easily lead to O(n^2) complexity as recursive / full tree + // validation is O(n). + void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation); + bool IsCordBtreeExhaustiveValidationEnabled(); + + class CordRepBtreeNavigator; + + // CordRepBtree is as the name implies a btree implementation of a Cordrep tree. + // Data is stored at the leaf level only, non leaf nodes contain down pointers + // only. Allowed types of data edges are FLAT, EXTERNAL and SUBSTRINGs of FLAT + // or EXTERNAL nodes. The implementation allows for data to be added to either + // end of the tree only, it does not provide any 'insert' logic. This has the + // benefit that we can expect good fill ratios: all nodes except the outer + // 'legs' will have 100% fill ratios for trees built using Append/Prepend + // methods. Merged trees will typically have a fill ratio well above 50% as in a + // similar fashion, one side of the merged tree will typically have a 100% fill + // ratio, and the 'open' end will average 50%. All operations are O(log(n)) or + // better, and the tree never needs balancing. + // + // All methods accepting a CordRep* or CordRepBtree* adopt a reference on that + // input unless explicitly stated otherwise. All functions returning a CordRep* + // or CordRepBtree* instance transfer a reference back to the caller. + // Simplified, callers both 'donate' and 'consume' a reference count on each + // call, simplifying the API. An example of building a tree: + // + // CordRepBtree* tree = CordRepBtree::Create(MakeFlat("Hello")); + // tree = CordRepBtree::Append(tree, MakeFlat("world")); + // + // In the above example, all inputs are consumed, making each call affecting + // `tree` reference count neutral. The returned `tree` value can be different + // from the input if the input is shared with other threads, or if the tree + // grows in height, but callers typically never have to concern themselves with + // that and trust that all methods DTRT at all times. + class CordRepBtree : public CordRep + { + public: + // EdgeType identifies `front` and `back` enum values. + // Various implementations in CordRepBtree such as `Add` and `Edge` are + // generic and templated on operating on either of the boundary edges. + // For more information on the possible edges contained in a CordRepBtree + // instance see the documentation for `edges_`. + enum class EdgeType + { + kFront, + kBack + }; + + // Convenience constants into `EdgeType` + static constexpr EdgeType kFront = EdgeType::kFront; + static constexpr EdgeType kBack = EdgeType::kBack; + + // Maximum number of edges: based on experiments and performance data, we can + // pick suitable values resulting in optimum cacheline aligned values. The + // preferred values are based on 64-bit systems where we aim to align this + // class onto 64 bytes, i.e.: 6 = 64 bytes, 14 = 128 bytes, etc. + // TODO(b/192061034): experiment with alternative sizes. + static constexpr size_t kMaxCapacity = 6; + + // Reasonable maximum height of the btree. We can expect a fill ratio of at + // least 50%: trees are always expanded at the front or back. Concatenating + // trees will then typically fold at the top most node, where the lower nodes + // are at least at capacity on one side of joined inputs. At a lower fill + // rate of 4 edges per node, we have capacity for ~16 million leaf nodes. + // We will fail / abort if an application ever exceeds this height, which + // should be extremely rare (near impossible) and be an indication of an + // application error: we do not assume it reasonable for any application to + // operate correctly with such monster trees. + // Another compelling reason for the number `12` is that any contextual stack + // required for navigation or insertion requires 12 words and 12 bytes, which + // fits inside 2 cache lines with some room to spare, and is reasonable as a + // local stack variable compared to Cord's current near 400 bytes stack use. + // The maximum `height` value of a node is then `kMaxDepth - 1` as node height + // values start with a value of 0 for leaf nodes. + static constexpr size_t kMaxDepth = 12; + // See comments on height() for why this is an int and not a size_t. + static constexpr int kMaxHeight = static_cast(kMaxDepth - 1); + + // `Action` defines the action for unwinding changes done at the btree's leaf + // level that need to be propagated up to the parent node(s). Each operation + // on a node has an effect / action defined as follows: + // - kSelf + // The operation (add / update, etc) was performed directly on the node as + // the node is private to the current thread (i.e.: not shared directly or + // indirectly through a refcount > 1). Changes can be propagated directly to + // all parent nodes as all parent nodes are also then private to the current + // thread. + // - kCopied + // The operation (add / update, etc) was performed on a copy of the original + // node, as the node is (potentially) directly or indirectly shared with + // other threads. Changes need to be propagated into the parent nodes where + // the old down pointer must be unreffed and replaced with this new copy. + // Such changes to parent nodes may themselves require a copy if the parent + // node is also shared. A kCopied action can propagate all the way to the + // top node where we then must unref the `tree` input provided by the + // caller, and return the new copy. + // - kPopped + // The operation (typically add) could not be satisfied due to insufficient + // capacity in the targeted node, and a new 'leg' was created that needs to + // be added into the parent node. For example, adding a FLAT inside a leaf + // node that is at capacity will create a new leaf node containing that + // FLAT, that needs to be 'popped' up the btree. Such 'pop' actions can + // cascade up the tree if parent nodes are also at capacity. A 'Popped' + // action propagating all the way to the top of the tree will result in + // the tree becoming one level higher than the current tree through a final + // `CordRepBtree::New(tree, popped)` call, resulting in a new top node + // referencing the old tree and the new (fully popped upwards) 'leg'. + enum Action + { + kSelf, + kCopied, + kPopped + }; + + // Result of an operation on a node. See the `Action` enum for details. + struct OpResult + { + CordRepBtree* tree; + Action action; + }; + + // Return value of the CopyPrefix and CopySuffix methods which can + // return a node or data edge at any height inside the tree. + // A height of 0 defines the lowest (leaf) node, a height of -1 identifies + // `edge` as being a plain data node: EXTERNAL / FLAT or SUBSTRING thereof. + struct CopyResult + { + CordRep* edge; + int height; + }; + + // Logical position inside a node: + // - index: index of the edge. + // - n: size or offset value depending on context. + struct Position + { + size_t index; + size_t n; + }; + + // Creates a btree from the given input. Adopts a ref of `rep`. + // If the input `rep` is itself a btree, i.e., `IsBtree()`, then this + // function immediately returns `rep->btree()`. If the input is a valid data + // edge (see IsDataEdge()), then a new leaf node is returned containing `rep` + // as the sole data edge. Else, the input is assumed to be a (legacy) concat + // tree, and the input is consumed and transformed into a btree(). + static CordRepBtree* Create(CordRep* rep); + + // Destroys the provided tree. Should only be called by cord internal API's, + // typically after a ref_count.Decrement() on the last reference count. + static void Destroy(CordRepBtree* tree); + + // Destruction + static void Delete(CordRepBtree* tree) + { + delete tree; + } + + // Use CordRep::Unref() as we overload for absl::Span. + using CordRep::Unref; + + // Unrefs all edges in `edges` which are assumed to be 'likely one'. + static void Unref(absl::Span edges); + + // Appends / Prepends an existing CordRep instance to this tree. + // The below methods accept three types of input: + // 1) `rep` is a data node (See `IsDataNode` for valid data edges). + // `rep` is appended or prepended to this tree 'as is'. + // 2) `rep` is a BTREE. + // `rep` is merged into `tree` respecting the Append/Prepend order. + // 3) `rep` is some other (legacy) type. + // `rep` is converted in place and added to `tree` + // Requires `tree` and `rep` to be not null. + static CordRepBtree* Append(CordRepBtree* tree, CordRep* rep); + static CordRepBtree* Prepend(CordRepBtree* tree, CordRep* rep); + + // Append/Prepend the data in `data` to this tree. + // The `extra` parameter defines how much extra capacity should be allocated + // for any additional FLAT being allocated. This is an optimization hint from + // the caller. For example, a caller may need to add 2 string_views of data + // "abc" and "defghi" which are not consecutive. The caller can in this case + // invoke `AddData(tree, "abc", 6)`, and any newly added flat is allocated + // where possible with at least 6 bytes of extra capacity beyond `length`. + // This helps avoiding data getting fragmented over multiple flats. + // There is no limit on the size of `data`. If `data` can not be stored inside + // a single flat, then the function will iteratively add flats until all data + // has been consumed and appended or prepended to the tree. + static CordRepBtree* Append(CordRepBtree* tree, string_view data, size_t extra = 0); + static CordRepBtree* Prepend(CordRepBtree* tree, string_view data, size_t extra = 0); + + // Returns a new tree, containing `n` bytes of data from this instance + // starting at offset `offset`. Where possible, the returned tree shares + // (re-uses) data edges and nodes with this instance to minimize the + // combined memory footprint of both trees. + // Requires `offset + n <= length`. Returns `nullptr` if `n` is zero. + CordRep* SubTree(size_t offset, size_t n); + + // Removes `n` trailing bytes from `tree`, and returns the resulting tree + // or data edge. Returns `tree` if n is zero, and nullptr if n == length. + // This function is logically identical to: + // result = tree->SubTree(0, tree->length - n); + // Unref(tree); + // return result; + // However, the actual implementation will as much as possible perform 'in + // place' modifications on the tree on all nodes and edges that are mutable. + // For example, in a fully privately owned tree with the last edge being a + // flat of length 12, RemoveSuffix(1) will simply set the length of that data + // edge to 11, and reduce the length of all nodes on the edge path by 1. + static CordRep* RemoveSuffix(CordRepBtree* tree, size_t n); + + // Returns the character at the given offset. + char GetCharacter(size_t offset) const; + + // Returns true if this node holds a single data edge, and if so, sets + // `fragment` to reference the contained data. `fragment` is an optional + // output parameter and allowed to be null. + bool IsFlat(absl::string_view* fragment) const; + + // Returns true if the data of `n` bytes starting at offset `offset` + // is contained in a single data edge, and if so, sets fragment to reference + // the contained data. `fragment` is an optional output parameter and allowed + // to be null. + bool IsFlat(size_t offset, size_t n, absl::string_view* fragment) const; + + // Returns a span (mutable range of bytes) of up to `size` bytes into the + // last FLAT data edge inside this tree under the following conditions: + // - none of the nodes down into the FLAT node are shared. + // - the last data edge in this tree is a non-shared FLAT. + // - the referenced FLAT has additional capacity available. + // If all these conditions are met, a non-empty span is returned, and the + // length of the flat node and involved tree nodes have been increased by + // `span.length()`. The caller is responsible for immediately assigning values + // to all uninitialized data reference by the returned span. + // Requires `this->refcount.IsOne()`: this function forces the caller to do + // this fast path check on the top level node, as this is the most commonly + // shared node of a cord tree. + Span GetAppendBuffer(size_t size); + + // Extracts the right-most data edge from this tree iff: + // - the tree and all internal edges to the right-most node are not shared. + // - the right-most node is a FLAT node and not shared. + // - the right-most node has at least the desired extra capacity. + // + // Returns {tree, nullptr} if any of the above conditions are not met. + // This method effectively removes data from the tree. The intent of this + // method is to allow applications appending small string data to use + // pre-existing capacity, and add the modified rep back to the tree. + // + // Simplified such code would look similar to this: + // void MyTreeBuilder::Append(string_view data) { + // ExtractResult result = CordRepBtree::ExtractAppendBuffer(tree_, 1); + // if (CordRep* rep = result.extracted) { + // size_t available = rep->Capacity() - rep->length; + // size_t n = std::min(data.size(), n); + // memcpy(rep->Data(), data.data(), n); + // rep->length += n; + // data.remove_prefix(n); + // if (!result.tree->IsBtree()) { + // tree_ = CordRepBtree::Create(result.tree); + // } + // tree_ = CordRepBtree::Append(tree_, rep); + // } + // ... + // // Remaining edge in `result.tree`. + // } + static ExtractResult ExtractAppendBuffer(CordRepBtree* tree, size_t extra_capacity = 1); + + // Returns the `height` of the tree. The height of a tree is limited to + // kMaxHeight. `height` is implemented as an `int` as in some places we + // use negative (-1) values for 'data edges'. + int height() const + { + return static_cast(storage[0]); + } + + // Properties: begin, back, end, front/back boundary indexes. + size_t begin() const + { + return static_cast(storage[1]); + } + size_t back() const + { + return static_cast(storage[2]) - 1; + } + size_t end() const + { + return static_cast(storage[2]); + } + size_t index(EdgeType edge) const + { + return edge == kFront ? begin() : back(); + } + + // Properties: size and capacity. + // `capacity` contains the current capacity of this instance, where + // `kMaxCapacity` contains the maximum capacity of a btree node. + // For now, `capacity` and `kMaxCapacity` return the same value, but this may + // change in the future if we see benefit in dynamically sizing 'small' nodes + // to 'large' nodes for large data trees. + size_t size() const + { + return end() - begin(); + } + size_t capacity() const + { + return kMaxCapacity; + } + + // Edge access + inline CordRep* Edge(size_t index) const; + inline CordRep* Edge(EdgeType edge_type) const; + inline absl::Span Edges() const; + inline absl::Span Edges(size_t begin, size_t end) const; + + // Returns reference to the data edge at `index`. + // Requires this instance to be a leaf node, and `index` to be valid index. + inline absl::string_view Data(size_t index) const; + + // Diagnostics: returns true if `tree` is valid and internally consistent. + // If `shallow` is false, then the provided top level node and all child nodes + // below it are recursively checked. If `shallow` is true, only the provided + // node in `tree` and the cumulative length, type and height of the direct + // child nodes of `tree` are checked. The value of `shallow` is ignored if the + // internal `cord_btree_exhaustive_validation` diagnostics variable is true, + // in which case the performed validations works as if `shallow` were false. + // This function is intended for debugging and testing purposes only. + static bool IsValid(const CordRepBtree* tree, bool shallow = false); + + // Diagnostics: asserts that the provided tree is valid. + // `AssertValid()` performs a shallow validation by default. `shallow` can be + // set to false in which case an exhaustive validation is performed. This + // function is implemented in terms of calling `IsValid()` and asserting the + // return value to be true. See `IsValid()` for more information. + // This function is intended for debugging and testing purposes only. + static CordRepBtree* AssertValid(CordRepBtree* tree, bool shallow = true); + static const CordRepBtree* AssertValid(const CordRepBtree* tree, bool shallow = true); + + // Diagnostics: dump the contents of this tree to `stream`. + // This function is intended for debugging and testing purposes only. + static void Dump(const CordRep* rep, std::ostream& stream); + static void Dump(const CordRep* rep, absl::string_view label, std::ostream& stream); + static void Dump(const CordRep* rep, absl::string_view label, bool include_contents, std::ostream& stream); + + // Adds the edge `edge` to this node if possible. `owned` indicates if the + // current node is potentially shared or not with other threads. Returns: + // - {kSelf, } + // The edge was directly added to this node. + // - {kCopied, } + // The edge was added to a copy of this node. + // - {kPopped, New(edge, height())} + // A new leg with the edge was created as this node has no extra capacity. + template + inline OpResult AddEdge(bool owned, CordRep* edge, size_t delta); + + // Replaces the front or back edge with the provided new edge. Returns: + // - {kSelf, } + // The edge was directly set in this node. The old edge is unreffed. + // - {kCopied, } + // A copy of this node was created with the new edge value. + // In both cases, the function adopts a reference on `edge`. + template + OpResult SetEdge(bool owned, CordRep* edge, size_t delta); + + // Creates a new empty node at the specified height. + static CordRepBtree* New(int height = 0); + + // Creates a new node containing `rep`, with the height being computed + // automatically based on the type of `rep`. + static CordRepBtree* New(CordRep* rep); + + // Creates a new node containing both `front` and `back` at height + // `front.height() + 1`. Requires `back.height() == front.height()`. + static CordRepBtree* New(CordRepBtree* front, CordRepBtree* back); + + // Creates a fully balanced tree from the provided tree by rebuilding a new + // tree from all data edges in the input. This function is automatically + // invoked internally when the tree exceeds the maximum height. + static CordRepBtree* Rebuild(CordRepBtree* tree); + + private: + CordRepBtree() = default; + ~CordRepBtree() = default; + + // Initializes the main properties `tag`, `begin`, `end`, `height`. + inline void InitInstance(int height, size_t begin = 0, size_t end = 0); + + // Direct property access begin / end + void set_begin(size_t begin) + { + storage[1] = static_cast(begin); + } + void set_end(size_t end) + { + storage[2] = static_cast(end); + } + + // Decreases the value of `begin` by `n`, and returns the new value. Notice + // how this returns the new value unlike atomic::fetch_add which returns the + // old value. This is because this is used to prepend edges at 'begin - 1'. + size_t sub_fetch_begin(size_t n) + { + storage[1] -= static_cast(n); + return storage[1]; + } + + // Increases the value of `end` by `n`, and returns the previous value. This + // function is typically used to append edges at 'end'. + size_t fetch_add_end(size_t n) + { + const uint8_t current = storage[2]; + storage[2] = static_cast(current + n); + return current; + } + + // Returns the index of the last edge starting on, or before `offset`, with + // `n` containing the relative offset of `offset` inside that edge. + // Requires `offset` < length. + Position IndexOf(size_t offset) const; + + // Returns the index of the last edge starting before `offset`, with `n` + // containing the relative offset of `offset` inside that edge. + // This function is useful to find the edges for some span of bytes ending at + // `offset` (i.e., `n` bytes). For example: + // + // Position pos = IndexBefore(n) + // edges = Edges(begin(), pos.index) // All full edges (may be empty) + // last = Sub(Edge(pos.index), 0, pos.n) // Last partial edge (may be empty) + // + // Requires 0 < `offset` <= length. + Position IndexBefore(size_t offset) const; + + // Returns the index of the edge ending at (or on) length `length`, and the + // number of bytes inside that edge up to `length`. For example, if we have a + // Node with 2 edges, one of 10 and one of 20 long, then IndexOfLength(27) + // will return {1, 17}, and IndexOfLength(10) will return {0, 10}. + Position IndexOfLength(size_t n) const; + + // Identical to the above function except starting from the position `front`. + // This function is equivalent to `IndexBefore(front.n + offset)`, with + // the difference that this function is optimized to start at `front.index`. + Position IndexBefore(Position front, size_t offset) const; + + // Returns the index of the edge directly beyond the edge containing offset + // `offset`, with `n` containing the distance of that edge from `offset`. + // This function is useful for iteratively finding suffix nodes and remaining + // partial bytes in left-most suffix nodes as for example in CopySuffix. + // Requires `offset` < length. + Position IndexBeyond(size_t offset) const; + + // Creates a new leaf node containing as much data as possible from `data`. + // The data is added either forwards or reversed depending on `edge_type`. + // Callers must check the length of the returned node to determine if all data + // was copied or not. + // See the `Append/Prepend` function for the meaning and purpose of `extra`. + template + static CordRepBtree* NewLeaf(absl::string_view data, size_t extra); + + // Creates a raw copy of this Btree node with the specified length, copying + // all properties, but without adding any references to existing edges. + CordRepBtree* CopyRaw(size_t new_length) const; + + // Creates a full copy of this Btree node, adding a reference on all edges. + CordRepBtree* Copy() const; + + // Creates a partial copy of this Btree node, copying all edges up to `end`, + // adding a reference on each copied edge, and sets the length of the newly + // created copy to `new_length`. + CordRepBtree* CopyBeginTo(size_t end, size_t new_length) const; + + // Returns a tree containing the edges [tree->begin(), end) and length + // of `new_length`. This method consumes a reference on the provided + // tree, and logically performs the following operation: + // result = tree->CopyBeginTo(end, new_length); + // CordRep::Unref(tree); + // return result; + static CordRepBtree* ConsumeBeginTo(CordRepBtree* tree, size_t end, size_t new_length); + + // Creates a partial copy of this Btree node, copying all edges starting at + // `begin`, adding a reference on each copied edge, and sets the length of + // the newly created copy to `new_length`. + CordRepBtree* CopyToEndFrom(size_t begin, size_t new_length) const; + + // Extracts and returns the front edge from the provided tree. + // This method consumes a reference on the provided tree, and logically + // performs the following operation: + // edge = CordRep::Ref(tree->Edge(kFront)); + // CordRep::Unref(tree); + // return edge; + static CordRep* ExtractFront(CordRepBtree* tree); + + // Returns a tree containing the result of appending `right` to `left`. + static CordRepBtree* MergeTrees(CordRepBtree* left, CordRepBtree* right); + + // Fallback functions for `Create()`, `Append()` and `Prepend()` which + // deal with legacy / non conforming input, i.e.: CONCAT trees. + static CordRepBtree* CreateSlow(CordRep* rep); + static CordRepBtree* AppendSlow(CordRepBtree*, CordRep* rep); + static CordRepBtree* PrependSlow(CordRepBtree*, CordRep* rep); + + // Recursively rebuilds `tree` into `stack`. If 'consume` is set to true, the + // function will consume a reference on `tree`. `stack` is a null terminated + // array containing the new tree's state, with the current leaf node at + // stack[0], and parent nodes above that, or null for 'top of tree'. + static void Rebuild(CordRepBtree** stack, CordRepBtree* tree, bool consume); + + // Aligns existing edges to start at index 0, to allow for a new edge to be + // added to the back of the current edges. + inline void AlignBegin(); + + // Aligns existing edges to end at `capacity`, to allow for a new edge to be + // added in front of the current edges. + inline void AlignEnd(); + + // Adds the provided edge to this node. + // Requires this node to have capacity for the edge. Realigns / moves + // existing edges as needed to prepend or append the new edge. + template + inline void Add(CordRep* rep); + + // Adds the provided edges to this node. + // Requires this node to have capacity for the edges. Realigns / moves + // existing edges as needed to prepend or append the new edges. + template + inline void Add(absl::Span); + + // Adds data from `data` to this node until either all data has been consumed, + // or there is no more capacity for additional flat nodes inside this node. + // Requires the current node to be a leaf node, data to be non empty, and the + // current node to have capacity for at least one more data edge. + // Returns any remaining data from `data` that was not added, which is + // depending on the edge type (front / back) either the remaining prefix of + // suffix of the input. + // See the `Append/Prepend` function for the meaning and purpose of `extra`. + template + absl::string_view AddData(absl::string_view data, size_t extra); + + // Replace the front or back edge with the provided value. + // Adopts a reference on `edge` and unrefs the old edge. + template + inline void SetEdge(CordRep* edge); + + // Returns a partial copy of the current tree containing the first `n` bytes + // of data. `CopyResult` contains both the resulting edge and its height. The + // resulting tree may be less high than the current tree, or even be a single + // matching data edge if `allow_folding` is set to true. + // For example, if `n == 1`, then the result will be the single data edge, and + // height will be set to -1 (one below the owning leaf node). If n == 0, this + // function returns null. Requires `n <= length` + CopyResult CopyPrefix(size_t n, bool allow_folding = true); + + // Returns a partial copy of the current tree containing all data starting + // after `offset`. `CopyResult` contains both the resulting edge and its + // height. The resulting tree may be less high than the current tree, or even + // be a single matching data edge. For example, if `n == length - 1`, then the + // result will be a single data edge, and height will be set to -1 (one below + // the owning leaf node). + // Requires `offset < length` + CopyResult CopySuffix(size_t offset); + + // Returns a OpResult value of {this, kSelf} or {Copy(), kCopied} + // depending on the value of `owned`. + inline OpResult ToOpResult(bool owned); + + // Adds `rep` to the specified tree, returning the modified tree. + template + static CordRepBtree* AddCordRep(CordRepBtree* tree, CordRep* rep); + + // Adds `data` to the specified tree, returning the modified tree. + // See the `Append/Prepend` function for the meaning and purpose of `extra`. + template + static CordRepBtree* AddData(CordRepBtree* tree, absl::string_view data, size_t extra = 0); + + // Merges `src` into `dst` with `src` being added either before (kFront) or + // after (kBack) `dst`. Requires the height of `dst` to be greater than or + // equal to the height of `src`. + template + static CordRepBtree* Merge(CordRepBtree* dst, CordRepBtree* src); + + // Fallback version of GetAppendBuffer for large trees: GetAppendBuffer() + // implements an inlined version for trees of limited height (3 levels), + // GetAppendBufferSlow implements the logic for large trees. + Span GetAppendBufferSlow(size_t size); + + // `edges_` contains all edges starting from this instance. + // These are explicitly `child` edges only, a cord btree (or any cord tree in + // that respect) does not store `parent` pointers anywhere: multiple trees / + // parents can reference the same shared child edge. The type of these edges + // depends on the height of the node. `Leaf nodes` (height == 0) contain `data + // edges` (external or flat nodes, or sub-strings thereof). All other nodes + // (height > 0) contain pointers to BTREE nodes with a height of `height - 1`. + CordRep* edges_[kMaxCapacity]; + + friend class CordRepBtreeTestPeer; + friend class CordRepBtreeNavigator; + }; + + inline CordRepBtree* CordRep::btree() + { + assert(IsBtree()); + return static_cast(this); + } + + inline const CordRepBtree* CordRep::btree() const + { + assert(IsBtree()); + return static_cast(this); + } + + inline void CordRepBtree::InitInstance(int height, size_t begin, size_t end) + { + tag = BTREE; + storage[0] = static_cast(height); + storage[1] = static_cast(begin); + storage[2] = static_cast(end); + } + + inline CordRep* CordRepBtree::Edge(size_t index) const + { + assert(index >= begin()); + assert(index < end()); + return edges_[index]; + } + + inline CordRep* CordRepBtree::Edge(EdgeType edge_type) const + { + return edges_[edge_type == kFront ? begin() : back()]; + } + + inline absl::Span CordRepBtree::Edges() const + { + return {edges_ + begin(), size()}; + } + + inline absl::Span CordRepBtree::Edges(size_t begin, size_t end) const + { + assert(begin <= end); + assert(begin >= this->begin()); + assert(end <= this->end()); + return {edges_ + begin, static_cast(end - begin)}; + } + + inline absl::string_view CordRepBtree::Data(size_t index) const + { + assert(height() == 0); + return EdgeData(Edge(index)); + } + + inline CordRepBtree* CordRepBtree::New(int height) + { + CordRepBtree* tree = new CordRepBtree; + tree->length = 0; + tree->InitInstance(height); + return tree; + } + + inline CordRepBtree* CordRepBtree::New(CordRep* rep) + { + CordRepBtree* tree = new CordRepBtree; + int height = rep->IsBtree() ? rep->btree()->height() + 1 : 0; + tree->length = rep->length; + tree->InitInstance(height, /*begin=*/0, /*end=*/1); + tree->edges_[0] = rep; + return tree; + } + + inline CordRepBtree* CordRepBtree::New(CordRepBtree* front, CordRepBtree* back) + { + assert(front->height() == back->height()); + CordRepBtree* tree = new CordRepBtree; + tree->length = front->length + back->length; + tree->InitInstance(front->height() + 1, /*begin=*/0, /*end=*/2); + tree->edges_[0] = front; + tree->edges_[1] = back; + return tree; + } + + inline void CordRepBtree::Unref(absl::Span edges) + { + for (CordRep* edge : edges) + { + if (ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) + { + CordRep::Destroy(edge); + } + } + } + + inline CordRepBtree* CordRepBtree::CopyRaw(size_t new_length) const + { + CordRepBtree* tree = new CordRepBtree; + + // `length` and `refcount` are the first members of `CordRepBtree`. + // We initialize `length` using the given length, have `refcount` be set to + // ref = 1 through its default constructor, and copy all data beyond + // 'refcount' which starts with `tag` using a single memcpy: all contents + // except `refcount` is trivially copyable, and the compiler does not + // efficiently coalesce member-wise copy of these members. + // See https://gcc.godbolt.org/z/qY8zsca6z + // # LINT.IfChange(copy_raw) + tree->length = new_length; + uint8_t* dst = &tree->tag; + const uint8_t* src = &tag; + const ptrdiff_t offset = src - reinterpret_cast(this); + memcpy(dst, src, sizeof(CordRepBtree) - static_cast(offset)); + return tree; + // # LINT.ThenChange() + } + + inline CordRepBtree* CordRepBtree::Copy() const + { + CordRepBtree* tree = CopyRaw(length); + for (CordRep* rep : Edges()) + CordRep::Ref(rep); + return tree; + } + + inline CordRepBtree* CordRepBtree::CopyToEndFrom(size_t begin, size_t new_length) const + { + assert(begin >= this->begin()); + assert(begin <= this->end()); + CordRepBtree* tree = CopyRaw(new_length); + tree->set_begin(begin); + for (CordRep* edge : tree->Edges()) + CordRep::Ref(edge); + return tree; + } + + inline CordRepBtree* CordRepBtree::CopyBeginTo(size_t end, size_t new_length) const + { + assert(end <= capacity()); + assert(end >= this->begin()); + CordRepBtree* tree = CopyRaw(new_length); + tree->set_end(end); + for (CordRep* edge : tree->Edges()) + CordRep::Ref(edge); + return tree; + } + + inline void CordRepBtree::AlignBegin() + { + // The below code itself does not need to be fast as typically we have + // mono-directional append/prepend calls, and `begin` / `end` are typically + // adjusted no more than once. But we want to avoid potential register clobber + // effects, making the compiler emit register save/store/spills, and minimize + // the size of code. + const size_t delta = begin(); + if (ABSL_PREDICT_FALSE(delta != 0)) + { + const size_t new_end = end() - delta; + set_begin(0); + set_end(new_end); + // TODO(mvels): we can write this using 2 loads / 2 stores depending on + // total size for the kMaxCapacity = 6 case. I.e., we can branch (switch) on + // size, and then do overlapping load/store of up to 4 pointers (inlined as + // XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a) + // compact and b) not clobbering any registers. + ABSL_ASSUME(new_end <= kMaxCapacity); +#ifdef __clang__ +#pragma unroll 1 +#endif + for (size_t i = 0; i < new_end; ++i) + { + edges_[i] = edges_[i + delta]; + } + } + } + + inline void CordRepBtree::AlignEnd() + { + // See comments in `AlignBegin` for motivation on the hand-rolled for loops. + const size_t delta = capacity() - end(); + if (delta != 0) + { + const size_t new_begin = begin() + delta; + const size_t new_end = end() + delta; + set_begin(new_begin); + set_end(new_end); + ABSL_ASSUME(new_end <= kMaxCapacity); +#ifdef __clang__ +#pragma unroll 1 +#endif + for (size_t i = new_end - 1; i >= new_begin; --i) + { + edges_[i] = edges_[i - delta]; + } + } + } + + template<> + inline void CordRepBtree::Add(CordRep* rep) + { + AlignBegin(); + edges_[fetch_add_end(1)] = rep; + } + + template<> + inline void CordRepBtree::Add( + absl::Span edges + ) + { + AlignBegin(); + size_t new_end = end(); + for (CordRep* edge : edges) + edges_[new_end++] = edge; + set_end(new_end); + } + + template<> + inline void CordRepBtree::Add(CordRep* rep) + { + AlignEnd(); + edges_[sub_fetch_begin(1)] = rep; + } + + template<> + inline void CordRepBtree::Add( + absl::Span edges + ) + { + AlignEnd(); + size_t new_begin = begin() - edges.size(); + set_begin(new_begin); + for (CordRep* edge : edges) + edges_[new_begin++] = edge; + } + + template + inline void CordRepBtree::SetEdge(CordRep* edge) + { + const int idx = edge_type == kFront ? begin() : back(); + CordRep::Unref(edges_[idx]); + edges_[idx] = edge; + } + + inline CordRepBtree::OpResult CordRepBtree::ToOpResult(bool owned) + { + return owned ? OpResult{this, kSelf} : OpResult{Copy(), kCopied}; + } + + inline CordRepBtree::Position CordRepBtree::IndexOf(size_t offset) const + { + assert(offset < length); + size_t index = begin(); + while (offset >= edges_[index]->length) + offset -= edges_[index++]->length; + return {index, offset}; + } + + inline CordRepBtree::Position CordRepBtree::IndexBefore(size_t offset) const + { + assert(offset > 0); + assert(offset <= length); + size_t index = begin(); + while (offset > edges_[index]->length) + offset -= edges_[index++]->length; + return {index, offset}; + } + + inline CordRepBtree::Position CordRepBtree::IndexBefore(Position front, size_t offset) const + { + size_t index = front.index; + offset = offset + front.n; + while (offset > edges_[index]->length) + offset -= edges_[index++]->length; + return {index, offset}; + } + + inline CordRepBtree::Position CordRepBtree::IndexOfLength(size_t n) const + { + assert(n <= length); + size_t index = back(); + size_t strip = length - n; + while (strip >= edges_[index]->length) + strip -= edges_[index--]->length; + return {index, edges_[index]->length - strip}; + } + + inline CordRepBtree::Position CordRepBtree::IndexBeyond( + const size_t offset + ) const + { + // We need to find the edge which `starting offset` is beyond (>=)`offset`. + // For this we can't use the `offset -= length` logic of IndexOf. Instead, we + // track the offset of the `current edge` in `off`, which we increase as we + // iterate over the edges until we find the matching edge. + size_t off = 0; + size_t index = begin(); + while (offset > off) + off += edges_[index++]->length; + return {index, off - offset}; + } + + inline CordRepBtree* CordRepBtree::Create(CordRep* rep) + { + if (IsDataEdge(rep)) + return New(rep); + return CreateSlow(rep); + } + + inline Span CordRepBtree::GetAppendBuffer(size_t size) + { + assert(refcount.IsOne()); + CordRepBtree* tree = this; + const int height = this->height(); + CordRepBtree* n1 = tree; + CordRepBtree* n2 = tree; + CordRepBtree* n3 = tree; + switch (height) + { + case 3: + tree = tree->Edge(kBack)->btree(); + if (!tree->refcount.IsOne()) + return {}; + n2 = tree; + ABSL_FALLTHROUGH_INTENDED; + case 2: + tree = tree->Edge(kBack)->btree(); + if (!tree->refcount.IsOne()) + return {}; + n1 = tree; + ABSL_FALLTHROUGH_INTENDED; + case 1: + tree = tree->Edge(kBack)->btree(); + if (!tree->refcount.IsOne()) + return {}; + ABSL_FALLTHROUGH_INTENDED; + case 0: + CordRep* edge = tree->Edge(kBack); + if (!edge->refcount.IsOne()) + return {}; + if (edge->tag < FLAT) + return {}; + size_t avail = edge->flat()->Capacity() - edge->length; + if (avail == 0) + return {}; + size_t delta = (std::min)(size, avail); + Span span = {edge->flat()->Data() + edge->length, delta}; + edge->length += delta; + switch (height) + { + case 3: + n3->length += delta; + ABSL_FALLTHROUGH_INTENDED; + case 2: + n2->length += delta; + ABSL_FALLTHROUGH_INTENDED; + case 1: + n1->length += delta; + ABSL_FALLTHROUGH_INTENDED; + case 0: + tree->length += delta; + return span; + } + break; + } + return GetAppendBufferSlow(size); + } + + extern template CordRepBtree* CordRepBtree::AddCordRep( + CordRepBtree* tree, CordRep* rep + ); + + extern template CordRepBtree* CordRepBtree::AddCordRep( + CordRepBtree* tree, CordRep* rep + ); + + inline CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, CordRep* rep) + { + if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) + { + return CordRepBtree::AddCordRep(tree, rep); + } + return AppendSlow(tree, rep); + } + + inline CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, CordRep* rep) + { + if (ABSL_PREDICT_TRUE(IsDataEdge(rep))) + { + return CordRepBtree::AddCordRep(tree, rep); + } + return PrependSlow(tree, rep); + } + +#ifdef NDEBUG + + inline CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree, bool /* shallow */) + { + return tree; + } + + inline const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree, bool /* shallow */) + { + return tree; + } + +#endif + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_navigator.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_navigator.h new file mode 100644 index 00000000..1a5a5de7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_navigator.h @@ -0,0 +1,301 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_ + +#include +#include + +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_btree.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordRepBtreeNavigator is a bi-directional navigator allowing callers to + // navigate all the (leaf) data edges in a CordRepBtree instance. + // + // A CordRepBtreeNavigator instance is by default empty. Callers initialize a + // navigator instance by calling one of `InitFirst()`, `InitLast()` or + // `InitOffset()`, which establishes a current position. Callers can then + // navigate using the `Next`, `Previous`, `Skip` and `Seek` methods. + // + // The navigator instance does not take or adopt a reference on the provided + // `tree` on any of the initialization calls. Callers are responsible for + // guaranteeing the lifecycle of the provided tree. A navigator instance can + // be reset to the empty state by calling `Reset`. + // + // A navigator only keeps positional state on the 'current data edge', it does + // explicitly not keep any 'offset' state. The class does accept and return + // offsets in the `Read()`, `Skip()` and 'Seek()` methods as these would + // otherwise put a big burden on callers. Callers are expected to maintain + // (returned) offset info if they require such granular state. + class CordRepBtreeNavigator + { + public: + // The logical position as returned by the Seek() and Skip() functions. + // Returns the current leaf edge for the desired seek or skip position and + // the offset of that position inside that edge. + struct Position + { + CordRep* edge; + size_t offset; + }; + + // The read result as returned by the Read() function. + // `tree` contains the resulting tree which is identical to the result + // of calling CordRepBtree::SubTree(...) on the tree being navigated. + // `n` contains the number of bytes used from the last navigated to + // edge of the tree. + struct ReadResult + { + CordRep* tree; + size_t n; + }; + + // Returns true if this instance is not empty. + explicit operator bool() const; + + // Returns the tree for this instance or nullptr if empty. + CordRepBtree* btree() const; + + // Returns the data edge of the current position. + // Requires this instance to not be empty. + CordRep* Current() const; + + // Resets this navigator to `tree`, returning the first data edge in the tree. + CordRep* InitFirst(CordRepBtree* tree); + + // Resets this navigator to `tree`, returning the last data edge in the tree. + CordRep* InitLast(CordRepBtree* tree); + + // Resets this navigator to `tree` returning the data edge at position + // `offset` and the relative offset of `offset` into that data edge. + // Returns `Position.edge = nullptr` if the provided offset is greater + // than or equal to the length of the tree, in which case the state of + // the navigator instance remains unchanged. + Position InitOffset(CordRepBtree* tree, size_t offset); + + // Navigates to the next data edge. + // Returns the next data edge or nullptr if there is no next data edge, in + // which case the current position remains unchanged. + CordRep* Next(); + + // Navigates to the previous data edge. + // Returns the previous data edge or nullptr if there is no previous data + // edge, in which case the current position remains unchanged. + CordRep* Previous(); + + // Navigates to the data edge at position `offset`. Returns the navigated to + // data edge in `Position.edge` and the relative offset of `offset` into that + // data edge in `Position.offset`. Returns `Position.edge = nullptr` if the + // provide offset is greater than or equal to the tree's length. + Position Seek(size_t offset); + + // Reads `n` bytes of data starting at offset `edge_offset` of the current + // data edge, and returns the result in `ReadResult.tree`. `ReadResult.n` + // contains the 'bytes used` from the last / current data edge in the tree. + // This allows users that mix regular navigation (using string views) and + // 'read into cord' navigation to keep track of the current state, and which + // bytes have been consumed from a navigator. + // This function returns `ReadResult.tree = nullptr` if the requested length + // exceeds the length of the tree starting at the current data edge. + ReadResult Read(size_t edge_offset, size_t n); + + // Skips `n` bytes forward from the current data edge, returning the navigated + // to data edge in `Position.edge` and `Position.offset` containing the offset + // inside that data edge. Note that the state of the navigator is left + // unchanged if `n` is smaller than the length of the current data edge. + Position Skip(size_t n); + + // Resets this instance to the default / empty state. + void Reset(); + + private: + // Slow path for Next() if Next() reached the end of a leaf node. Backtracks + // up the stack until it finds a node that has a 'next' position available, + // and then does a 'front dive' towards the next leaf node. + CordRep* NextUp(); + + // Slow path for Previous() if Previous() reached the beginning of a leaf + // node. Backtracks up the stack until it finds a node that has a 'previous' + // position available, and then does a 'back dive' towards the previous leaf + // node. + CordRep* PreviousUp(); + + // Generic implementation of InitFirst() and InitLast(). + template + CordRep* Init(CordRepBtree* tree); + + // `height_` contains the height of the current tree, or -1 if empty. + int height_ = -1; + + // `index_` and `node_` contain the navigation state as the 'path' to the + // current data edge which is at `node_[0]->Edge(index_[0])`. The contents + // of these are undefined until the instance is initialized (`height_ >= 0`). + uint8_t index_[CordRepBtree::kMaxDepth]; + CordRepBtree* node_[CordRepBtree::kMaxDepth]; + }; + + // Returns true if this instance is not empty. + inline CordRepBtreeNavigator::operator bool() const + { + return height_ >= 0; + } + + inline CordRepBtree* CordRepBtreeNavigator::btree() const + { + return height_ >= 0 ? node_[height_] : nullptr; + } + + inline CordRep* CordRepBtreeNavigator::Current() const + { + assert(height_ >= 0); + return node_[0]->Edge(index_[0]); + } + + inline void CordRepBtreeNavigator::Reset() + { + height_ = -1; + } + + inline CordRep* CordRepBtreeNavigator::InitFirst(CordRepBtree* tree) + { + return Init(tree); + } + + inline CordRep* CordRepBtreeNavigator::InitLast(CordRepBtree* tree) + { + return Init(tree); + } + + template + inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) + { + assert(tree != nullptr); + assert(tree->size() > 0); + assert(tree->height() <= CordRepBtree::kMaxHeight); + int height = height_ = tree->height(); + size_t index = tree->index(edge_type); + node_[height] = tree; + index_[height] = static_cast(index); + while (--height >= 0) + { + tree = tree->Edge(index)->btree(); + node_[height] = tree; + index = tree->index(edge_type); + index_[height] = static_cast(index); + } + return node_[0]->Edge(index); + } + + inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek( + size_t offset + ) + { + assert(btree() != nullptr); + int height = height_; + CordRepBtree* edge = node_[height]; + if (ABSL_PREDICT_FALSE(offset >= edge->length)) + return {nullptr, 0}; + CordRepBtree::Position index = edge->IndexOf(offset); + index_[height] = static_cast(index.index); + while (--height >= 0) + { + edge = edge->Edge(index.index)->btree(); + node_[height] = edge; + index = edge->IndexOf(index.n); + index_[height] = static_cast(index.index); + } + return {edge->Edge(index.index), index.n}; + } + + inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset( + CordRepBtree* tree, size_t offset + ) + { + assert(tree != nullptr); + assert(tree->height() <= CordRepBtree::kMaxHeight); + if (ABSL_PREDICT_FALSE(offset >= tree->length)) + return {nullptr, 0}; + height_ = tree->height(); + node_[height_] = tree; + return Seek(offset); + } + + inline CordRep* CordRepBtreeNavigator::Next() + { + CordRepBtree* edge = node_[0]; + return index_[0] == edge->back() ? NextUp() : edge->Edge(++index_[0]); + } + + inline CordRep* CordRepBtreeNavigator::Previous() + { + CordRepBtree* edge = node_[0]; + return index_[0] == edge->begin() ? PreviousUp() : edge->Edge(--index_[0]); + } + + inline CordRep* CordRepBtreeNavigator::NextUp() + { + assert(index_[0] == node_[0]->back()); + CordRepBtree* edge; + size_t index; + int height = 0; + do + { + if (++height > height_) + return nullptr; + edge = node_[height]; + index = index_[height] + 1; + } while (index == edge->end()); + index_[height] = static_cast(index); + do + { + node_[--height] = edge = edge->Edge(index)->btree(); + index_[height] = static_cast(index = edge->begin()); + } while (height > 0); + return edge->Edge(index); + } + + inline CordRep* CordRepBtreeNavigator::PreviousUp() + { + assert(index_[0] == node_[0]->begin()); + CordRepBtree* edge; + size_t index; + int height = 0; + do + { + if (++height > height_) + return nullptr; + edge = node_[height]; + index = index_[height]; + } while (index == edge->begin()); + index_[height] = static_cast(--index); + do + { + node_[--height] = edge = edge->Edge(index)->btree(); + index_[height] = static_cast(index = edge->back()); + } while (height > 0); + return edge->Edge(index); + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_NAVIGATOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_reader.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_reader.h new file mode 100644 index 00000000..78e96379 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_reader.h @@ -0,0 +1,238 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_ + +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cord_data_edge.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_btree_navigator.h" +#include "absl/strings/internal/cord_rep_flat.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordRepBtreeReader implements logic to iterate over cord btrees. + // References to the underlying data are returned as absl::string_view values. + // The most typical use case is a forward only iteration over tree data. + // The class also provides `Skip()`, `Seek()` and `Read()` methods similar to + // CordRepBtreeNavigator that allow more advanced navigation. + // + // Example: iterate over all data inside a cord btree: + // + // CordRepBtreeReader reader; + // for (string_view sv = reader.Init(tree); !sv.Empty(); sv = sv.Next()) { + // DoSomethingWithDataIn(sv); + // } + // + // All navigation methods always return the next 'chunk' of data. The class + // assumes that all data is directly 'consumed' by the caller. For example: + // invoking `Skip()` will skip the desired number of bytes, and directly + // read and return the next chunk of data directly after the skipped bytes. + // + // Example: iterate over all data inside a btree skipping the first 100 bytes: + // + // CordRepBtreeReader reader; + // absl::string_view sv = reader.Init(tree); + // if (sv.length() > 100) { + // sv.RemovePrefix(100); + // } else { + // sv = reader.Skip(100 - sv.length()); + // } + // while (!sv.empty()) { + // DoSomethingWithDataIn(sv); + // absl::string_view sv = reader.Next(); + // } + // + // It is important to notice that `remaining` is based on the end position of + // the last data edge returned to the caller, not the cumulative data returned + // to the caller which can be less in cases of skipping or seeking over data. + // + // For example, consider a cord btree with five data edges: "abc", "def", "ghi", + // "jkl" and "mno": + // + // absl::string_view sv; + // CordRepBtreeReader reader; + // + // sv = reader.Init(tree); // sv = "abc", remaining = 12 + // sv = reader.Skip(4); // sv = "hi", remaining = 6 + // sv = reader.Skip(2); // sv = "l", remaining = 3 + // sv = reader.Next(); // sv = "mno", remaining = 0 + // sv = reader.Seek(1); // sv = "bc", remaining = 12 + // + class CordRepBtreeReader + { + public: + using ReadResult = CordRepBtreeNavigator::ReadResult; + using Position = CordRepBtreeNavigator::Position; + + // Returns true if this instance is not empty. + explicit operator bool() const + { + return navigator_.btree() != nullptr; + } + + // Returns the tree referenced by this instance or nullptr if empty. + CordRepBtree* btree() const + { + return navigator_.btree(); + } + + // Returns the current data edge inside the referenced btree. + // Requires that the current instance is not empty. + CordRep* node() const + { + return navigator_.Current(); + } + + // Returns the length of the referenced tree. + // Requires that the current instance is not empty. + size_t length() const; + + // Returns the number of remaining bytes available for iteration, which is the + // number of bytes directly following the end of the last chunk returned. + // This value will be zero if we iterated over the last edge in the bound + // tree, in which case any call to Next() or Skip() will return an empty + // string_view reflecting the EOF state. + // Note that a call to `Seek()` resets `remaining` to a value based on the + // end position of the chunk returned by that call. + size_t remaining() const + { + return remaining_; + } + + // Resets this instance to an empty value. + void Reset() + { + navigator_.Reset(); + } + + // Initializes this instance with `tree`. `tree` must not be null. + // Returns a reference to the first data edge of the provided tree. + absl::string_view Init(CordRepBtree* tree); + + // Navigates to and returns the next data edge of the referenced tree. + // Returns an empty string_view if an attempt is made to read beyond the end + // of the tree, i.e.: if `remaining()` is zero indicating an EOF condition. + // Requires that the current instance is not empty. + absl::string_view Next(); + + // Skips the provided amount of bytes and returns a reference to the data + // directly following the skipped bytes. + absl::string_view Skip(size_t skip); + + // Reads `n` bytes into `tree`. + // If `chunk_size` is zero, starts reading at the next data edge. If + // `chunk_size` is non zero, the read starts at the last `chunk_size` bytes of + // the last returned data edge. Effectively, this means that the read starts + // at offset `consumed() - chunk_size`. + // Requires that `chunk_size` is less than or equal to the length of the + // last returned data edge. The purpose of `chunk_size` is to simplify code + // partially consuming a returned chunk and wanting to include the remaining + // bytes in the Read call. For example, the below code will read 1000 bytes of + // data into a cord tree if the first chunk starts with "big:": + // + // CordRepBtreeReader reader; + // absl::string_view sv = reader.Init(tree); + // if (absl::StartsWith(sv, "big:")) { + // CordRepBtree tree; + // sv = reader.Read(1000, sv.size() - 4 /* "big:" */, &tree); + // } + // + // This method will return an empty string view if all remaining data was + // read. If `n` exceeded the amount of remaining data this function will + // return an empty string view and `tree` will be set to nullptr. + // In both cases, `consumed` will be set to `length`. + absl::string_view Read(size_t n, size_t chunk_size, CordRep*& tree); + + // Navigates to the chunk at offset `offset`. + // Returns a reference into the navigated to chunk, adjusted for the relative + // position of `offset` into that chunk. For example, calling `Seek(13)` on a + // cord tree containing 2 chunks of 10 and 20 bytes respectively will return + // a string view into the second chunk starting at offset 3 with a size of 17. + // Returns an empty string view if `offset` is equal to or greater than the + // length of the referenced tree. + absl::string_view Seek(size_t offset); + + private: + size_t remaining_ = 0; + CordRepBtreeNavigator navigator_; + }; + + inline size_t CordRepBtreeReader::length() const + { + assert(btree() != nullptr); + return btree()->length; + } + + inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) + { + assert(tree != nullptr); + const CordRep* edge = navigator_.InitFirst(tree); + remaining_ = tree->length - edge->length; + return EdgeData(edge); + } + + inline absl::string_view CordRepBtreeReader::Next() + { + if (remaining_ == 0) + return {}; + const CordRep* edge = navigator_.Next(); + assert(edge != nullptr); + remaining_ -= edge->length; + return EdgeData(edge); + } + + inline absl::string_view CordRepBtreeReader::Skip(size_t skip) + { + // As we are always positioned on the last 'consumed' edge, we + // need to skip the current edge as well as `skip`. + const size_t edge_length = navigator_.Current()->length; + CordRepBtreeNavigator::Position pos = navigator_.Skip(skip + edge_length); + if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) + { + remaining_ = 0; + return {}; + } + // The combined length of all edges skipped before `pos.edge` is `skip - + // pos.offset`, all of which are 'consumed', as well as the current edge. + remaining_ -= skip - pos.offset + pos.edge->length; + return EdgeData(pos.edge).substr(pos.offset); + } + + inline absl::string_view CordRepBtreeReader::Seek(size_t offset) + { + const CordRepBtreeNavigator::Position pos = navigator_.Seek(offset); + if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) + { + remaining_ = 0; + return {}; + } + absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset); + remaining_ = length() - offset - chunk.length(); + return chunk; + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_consume.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_consume.h new file mode 100644 index 00000000..dacd4b0b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_consume.h @@ -0,0 +1,47 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_ + +#include + +#include "absl/functional/function_ref.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // Consume() and ReverseConsume() consume CONCAT based trees and invoke the + // provided functor with the contained nodes in the proper forward or reverse + // order, which is used to convert CONCAT trees into other tree or cord data. + // All CONCAT and SUBSTRING nodes are processed internally. The 'offset` + // parameter of the functor is non-zero for any nodes below SUBSTRING nodes. + // It's up to the caller to form these back into SUBSTRING nodes or otherwise + // store offset / prefix information. These functions are intended to be used + // only for migration / transitional code where due to factors such as ODR + // violations, we can not 100% guarantee that all code respects 'new format' + // settings and flags, so we need to be able to parse old data on the fly until + // all old code is deprecated / no longer the default format. + void Consume(CordRep* rep, FunctionRef consume_fn); + void ReverseConsume(CordRep* rep, FunctionRef consume_fn); + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_CONSUME_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_crc.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_crc.h new file mode 100644 index 00000000..a78afd3a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_crc.h @@ -0,0 +1,121 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/crc/internal/crc_cord_state.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordRepCrc is a CordRep node intended only to appear at the top level of a + // cord tree. It associates an "expected CRC" with the contained data, to allow + // for easy passage of checksum data in Cord data flows. + // + // From Cord's perspective, the crc value has no semantics; any validation of + // the contained checksum is the user's responsibility. + struct CordRepCrc : public CordRep + { + CordRep* child; + absl::crc_internal::CrcCordState crc_cord_state; + + // Consumes `child` and returns a CordRepCrc prefixed tree containing `child`. + // If the specified `child` is itself a CordRepCrc node, then this method + // either replaces the existing node, or directly updates the crc state in it + // depending on the node being shared or not, i.e.: refcount.IsOne(). + // `child` must only be null if the Cord is empty. Never returns null. + static CordRepCrc* New(CordRep* child, crc_internal::CrcCordState state); + + // Destroys (deletes) the provided node. `node` must not be null. + static void Destroy(CordRepCrc* node); + }; + + // Consumes `rep` and returns a CordRep* with any outer CordRepCrc wrapper + // removed. This is usually a no-op (returning `rep`), but this will remove and + // unref an outer CordRepCrc node. + inline CordRep* RemoveCrcNode(CordRep* rep) + { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) + { + CordRep* child = rep->crc()->child; + if (rep->refcount.IsOne()) + { + delete rep->crc(); + } + else + { + CordRep::Ref(child); + CordRep::Unref(rep); + } + return child; + } + return rep; + } + + // Returns `rep` if it is not a CordRepCrc node, or its child if it is. + // Does not consume or create a reference on `rep` or the returned value. + inline CordRep* SkipCrcNode(CordRep* rep) + { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) + { + return rep->crc()->child; + } + else + { + return rep; + } + } + + inline const CordRep* SkipCrcNode(const CordRep* rep) + { + assert(rep != nullptr); + if (ABSL_PREDICT_FALSE(rep->IsCrc())) + { + return rep->crc()->child; + } + else + { + return rep; + } + } + + inline CordRepCrc* CordRep::crc() + { + assert(IsCrc()); + return static_cast(this); + } + + inline const CordRepCrc* CordRep::crc() const + { + assert(IsCrc()); + return static_cast(this); + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_flat.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_flat.h new file mode 100644 index 00000000..59693b83 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_flat.h @@ -0,0 +1,222 @@ +// Copyright 2020 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/strings/internal/cord_internal.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // Note: all constants below are never ODR used and internal to cord, we define + // these as static constexpr to avoid 'in struct' definition and usage clutter. + + // Largest and smallest flat node lengths we are willing to allocate + // Flat allocation size is stored in tag, which currently can encode sizes up + // to 4K, encoded as multiple of either 8 or 32 bytes. + // If we allow for larger sizes, we need to change this to 8/64, 16/128, etc. + // kMinFlatSize is bounded by tag needing to be at least FLAT * 8 bytes, and + // ideally a 'nice' size aligning with allocation and cacheline sizes like 32. + // kMaxFlatSize is bounded by the size resulting in a computed tag no greater + // than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values. + static constexpr size_t kFlatOverhead = offsetof(CordRep, storage); + static constexpr size_t kMinFlatSize = 32; + static constexpr size_t kMaxFlatSize = 4096; + static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead; + static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead; + static constexpr size_t kMaxLargeFlatSize = 256 * 1024; + static constexpr size_t kMaxLargeFlatLength = kMaxLargeFlatSize - kFlatOverhead; + + // kTagBase should make the Size <--> Tag computation resilient + // against changes to the value of FLAT when we add a new tag.. + static constexpr uint8_t kTagBase = FLAT - 4; + + // Converts the provided rounded size to the corresponding tag + constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) + { + return static_cast(size <= 512 ? kTagBase + size / 8 : size <= 8192 ? kTagBase + 512 / 8 + size / 64 - 512 / 64 : + kTagBase + 512 / 8 + ((8192 - 512) / 64) + size / 4096 - 8192 / 4096); + } + + // Converts the provided tag to the corresponding allocated size + constexpr size_t TagToAllocatedSize(uint8_t tag) + { + return (tag <= kTagBase + 512 / 8) ? tag * 8 - kTagBase * 8 : (tag <= kTagBase + (512 / 8) + ((8192 - 512) / 64)) ? 512 + tag * 64 - kTagBase * 64 - 512 / 8 * 64 : + 8192 + tag * 4096 - kTagBase * 4096 - ((512 / 8) + ((8192 - 512) / 64)) * 4096; + } + + static_assert(AllocatedSizeToTagUnchecked(kMinFlatSize) == FLAT, ""); + static_assert(AllocatedSizeToTagUnchecked(kMaxLargeFlatSize) == MAX_FLAT_TAG, ""); + + // RoundUp logically performs `((n + m - 1) / m) * m` to round up to the nearest + // multiple of `m`, optimized for the invariant that `m` is a power of 2. + constexpr size_t RoundUp(size_t n, size_t m) + { + return (n + m - 1) & (0 - m); + } + + // Returns the size to the nearest equal or larger value that can be + // expressed exactly as a tag value. + inline size_t RoundUpForTag(size_t size) + { + return RoundUp(size, (size <= 512) ? 8 : (size <= 8192 ? 64 : 4096)); + } + + // Converts the allocated size to a tag, rounding down if the size + // does not exactly match a 'tag expressible' size value. The result is + // undefined if the size exceeds the maximum size that can be encoded in + // a tag, i.e., if size is larger than TagToAllocatedSize(). + inline uint8_t AllocatedSizeToTag(size_t size) + { + const uint8_t tag = AllocatedSizeToTagUnchecked(size); + assert(tag <= MAX_FLAT_TAG); + return tag; + } + + // Converts the provided tag to the corresponding available data length + constexpr size_t TagToLength(uint8_t tag) + { + return TagToAllocatedSize(tag) - kFlatOverhead; + } + + // Enforce that kMaxFlatSize maps to a well-known exact tag value. + static_assert(TagToAllocatedSize(MAX_FLAT_TAG) == kMaxLargeFlatSize, "Bad tag logic"); + + struct CordRepFlat : public CordRep + { + // Tag for explicit 'large flat' allocation + struct Large + { + }; + + // Creates a new flat node. + template + static CordRepFlat* NewImpl(size_t len, Args... args ABSL_ATTRIBUTE_UNUSED) + { + if (len <= kMinFlatLength) + { + len = kMinFlatLength; + } + else if (len > max_flat_size - kFlatOverhead) + { + len = max_flat_size - kFlatOverhead; + } + + // Round size up so it matches a size we can exactly express in a tag. + const size_t size = RoundUpForTag(len + kFlatOverhead); + void* const raw_rep = ::operator new(size); +// GCC 13 has a false-positive -Wstringop-overflow warning here. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstringop-overflow" +#endif + CordRepFlat* rep = new (raw_rep) CordRepFlat(); + rep->tag = AllocatedSizeToTag(size); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0) +#pragma GCC diagnostic pop +#endif + return rep; + } + + static CordRepFlat* New(size_t len) + { + return NewImpl(len); + } + + static CordRepFlat* New(Large, size_t len) + { + return NewImpl(len); + } + + // Deletes a CordRepFlat instance created previously through a call to New(). + // Flat CordReps are allocated and constructed with raw ::operator new and + // placement new, and must be destructed and deallocated accordingly. + static void Delete(CordRep* rep) + { + assert(rep->tag >= FLAT && rep->tag <= MAX_FLAT_TAG); + +#if defined(__cpp_sized_deallocation) + size_t size = TagToAllocatedSize(rep->tag); + rep->~CordRep(); + ::operator delete(rep, size); +#else + rep->~CordRep(); + ::operator delete(rep); +#endif + } + + // Create a CordRepFlat containing `data`, with an optional additional + // extra capacity of up to `extra` bytes. Requires that `data.size()` + // is less than kMaxFlatLength. + static CordRepFlat* Create(absl::string_view data, size_t extra = 0) + { + assert(data.size() <= kMaxFlatLength); + CordRepFlat* flat = New(data.size() + (std::min)(extra, kMaxFlatLength)); + memcpy(flat->Data(), data.data(), data.size()); + flat->length = data.size(); + return flat; + } + + // Returns a pointer to the data inside this flat rep. + char* Data() + { + return reinterpret_cast(storage); + } + const char* Data() const + { + return reinterpret_cast(storage); + } + + // Returns the maximum capacity (payload size) of this instance. + size_t Capacity() const + { + return TagToLength(tag); + } + + // Returns the allocated size (payload + overhead) of this instance. + size_t AllocatedSize() const + { + return TagToAllocatedSize(tag); + } + }; + + // Now that CordRepFlat is defined, we can define CordRep's helper casts: + inline CordRepFlat* CordRep::flat() + { + assert(tag >= FLAT && tag <= MAX_FLAT_TAG); + return reinterpret_cast(this); + } + + inline const CordRepFlat* CordRep::flat() const + { + assert(tag >= FLAT && tag <= MAX_FLAT_TAG); + return reinterpret_cast(this); + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_FLAT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring.h new file mode 100644 index 00000000..a528659f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring.h @@ -0,0 +1,657 @@ +// Copyright 2020 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/layout.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_flat.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // All operations modifying a ring buffer are implemented as static methods + // requiring a CordRepRing instance with a reference adopted by the method. + // + // The methods return the modified ring buffer, which may be equal to the input + // if the input was not shared, and having large enough capacity to accommodate + // any newly added node(s). Otherwise, a copy of the input rep with the new + // node(s) added is returned. + // + // Any modification on non shared ring buffers with enough capacity will then + // require minimum atomic operations. Caller should where possible provide + // reasonable `extra` hints for both anticipated extra `flat` byte space, as + // well as anticipated extra nodes required for complex operations. + // + // Example of code creating a ring buffer, adding some data to it, + // and discarding the buffer when done: + // + // void FunWithRings() { + // // Create ring with 3 flats + // CordRep* flat = CreateFlat("Hello"); + // CordRepRing* ring = CordRepRing::Create(flat, 2); + // ring = CordRepRing::Append(ring, CreateFlat(" ")); + // ring = CordRepRing::Append(ring, CreateFlat("world")); + // DoSomethingWithRing(ring); + // CordRep::Unref(ring); + // } + // + // Example of code Copying an existing ring buffer and modifying it: + // + // void MoreFunWithRings(CordRepRing* src) { + // CordRepRing* ring = CordRep::Ref(src)->ring(); + // ring = CordRepRing::Append(ring, CreateFlat("Hello")); + // ring = CordRepRing::Append(ring, CreateFlat(" ")); + // ring = CordRepRing::Append(ring, CreateFlat("world")); + // DoSomethingWithRing(ring); + // CordRep::Unref(ring); + // } + // + class CordRepRing : public CordRep + { + public: + // `pos_type` represents a 'logical position'. A CordRepRing instance has a + // `begin_pos` (default 0), and each node inside the buffer will have an + // `end_pos` which is the `end_pos` of the previous node (or `begin_pos`) plus + // this node's length. The purpose is to allow for a binary search on this + // position, while allowing O(1) prepend and append operations. + using pos_type = size_t; + + // `index_type` is the type for the `head`, `tail` and `capacity` indexes. + // Ring buffers are limited to having no more than four billion entries. + using index_type = uint32_t; + + // `offset_type` is the type for the data offset inside a child rep's data. + using offset_type = uint32_t; + + // Position holds the node index and relative offset into the node for + // some physical offset in the contained data as returned by the Find() + // and FindTail() methods. + struct Position + { + index_type index; + size_t offset; + }; + + // The maximum # of child nodes that can be hosted inside a CordRepRing. + static constexpr size_t kMaxCapacity = (std::numeric_limits::max)(); + + // CordRepring can not be default constructed, moved, copied or assigned. + CordRepRing() = delete; + CordRepRing(const CordRepRing&) = delete; + CordRepRing& operator=(const CordRepRing&) = delete; + + // Returns true if this instance is valid, false if some or all of the + // invariants are broken. Intended for debug purposes only. + // `output` receives an explanation of the broken invariants. + bool IsValid(std::ostream& output) const; + + // Returns the size in bytes for a CordRepRing with `capacity' entries. + static constexpr size_t AllocSize(size_t capacity); + + // Returns the distance in bytes from `pos` to `end_pos`. + static constexpr size_t Distance(pos_type pos, pos_type end_pos); + + // Creates a new ring buffer from the provided `rep`. Adopts a reference + // on `rep`. The returned ring buffer has a capacity of at least `extra + 1` + static CordRepRing* Create(CordRep* child, size_t extra = 0); + + // `head`, `tail` and `capacity` indexes defining the ring buffer boundaries. + index_type head() const + { + return head_; + } + index_type tail() const + { + return tail_; + } + index_type capacity() const + { + return capacity_; + } + + // Returns the number of entries in this instance. + index_type entries() const + { + return entries(head_, tail_); + } + + // Returns the logical begin position of this instance. + pos_type begin_pos() const + { + return begin_pos_; + } + + // Returns the number of entries for a given head-tail range. + // Requires `head` and `tail` values to be less than `capacity()`. + index_type entries(index_type head, index_type tail) const + { + assert(head < capacity_ && tail < capacity_); + return tail - head + ((tail > head) ? 0 : capacity_); + } + + // Returns the logical end position of entry `index`. + pos_type const& entry_end_pos(index_type index) const + { + assert(IsValidIndex(index)); + return Layout::Partial().Pointer<0>(data_)[index]; + } + + // Returns the child pointer of entry `index`. + CordRep* const& entry_child(index_type index) const + { + assert(IsValidIndex(index)); + return Layout::Partial(capacity()).Pointer<1>(data_)[index]; + } + + // Returns the data offset of entry `index` + offset_type const& entry_data_offset(index_type index) const + { + assert(IsValidIndex(index)); + return Layout::Partial(capacity(), capacity()).Pointer<2>(data_)[index]; + } + + // Appends the provided child node to the `rep` instance. + // Adopts a reference from `rep` and `child` which may not be null. + // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node + // containing a FLAT or EXTERNAL node, then flat or external the node is added + // 'as is', with an offset added for the SUBSTRING case. + // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING or + // CONCAT tree, then all child nodes not excluded by any start offset or + // length values are added recursively. + static CordRepRing* Append(CordRepRing* rep, CordRep* child); + + // Appends the provided string data to the `rep` instance. + // This function will attempt to utilize any remaining capacity in the last + // node of the input if that node is not shared (directly or indirectly), and + // of type FLAT. Remaining data will be added as one or more FLAT nodes. + // Any last node added to the ring buffer will be allocated with up to + // `extra` bytes of capacity for (anticipated) subsequent append actions. + static CordRepRing* Append(CordRepRing* rep, string_view data, size_t extra = 0); + + // Prepends the provided child node to the `rep` instance. + // Adopts a reference from `rep` and `child` which may not be null. + // If the provided child is a FLAT or EXTERNAL node, or a SUBSTRING node + // containing a FLAT or EXTERNAL node, then flat or external the node is + // prepended 'as is', with an optional offset added for the SUBSTRING case. + // If the provided child is a RING or CONCAT tree, or a SUBSTRING of a RING + // or CONCAT tree, then all child nodes not excluded by any start offset or + // length values are added recursively. + static CordRepRing* Prepend(CordRepRing* rep, CordRep* child); + + // Prepends the provided string data to the `rep` instance. + // This function will attempt to utilize any remaining capacity in the first + // node of the input if that node is not shared (directly or indirectly), and + // of type FLAT. Remaining data will be added as one or more FLAT nodes. + // Any first node prepnded to the ring buffer will be allocated with up to + // `extra` bytes of capacity for (anticipated) subsequent prepend actions. + static CordRepRing* Prepend(CordRepRing* rep, string_view data, size_t extra = 0); + + // Returns a span referencing potentially unused capacity in the last node. + // The returned span may be empty if no such capacity is available, or if the + // current instance is shared. Else, a span of size `n <= size` is returned. + // If non empty, the ring buffer is adjusted to the new length, with the newly + // added capacity left uninitialized. Callers should assign a value to the + // entire span before any other operations on this instance. + Span GetAppendBuffer(size_t size); + + // Returns a span referencing potentially unused capacity in the first node. + // This function is identical to GetAppendBuffer except that it returns a span + // referencing up to `size` capacity directly before the existing data. + Span GetPrependBuffer(size_t size); + + // Returns a cord ring buffer containing `len` bytes of data starting at + // `offset`. If the input is not shared, this function will remove all head + // and tail child nodes outside of the requested range, and adjust the new + // head and tail nodes as required. If the input is shared, this function + // returns a new instance sharing some or all of the nodes from the input. + static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t len, size_t extra = 0); + + // Returns a cord ring buffer with the first `len` bytes removed. + // If the input is not shared, this function will remove all head child nodes + // fully inside the first `length` bytes, and adjust the new head as required. + // If the input is shared, this function returns a new instance sharing some + // or all of the nodes from the input. + static CordRepRing* RemoveSuffix(CordRepRing* r, size_t len, size_t extra = 0); + + // Returns a cord ring buffer with the last `len` bytes removed. + // If the input is not shared, this function will remove all head child nodes + // fully inside the first `length` bytes, and adjust the new head as required. + // If the input is shared, this function returns a new instance sharing some + // or all of the nodes from the input. + static CordRepRing* RemovePrefix(CordRepRing* r, size_t len, size_t extra = 0); + + // Returns the character at `offset`. Requires that `offset < length`. + char GetCharacter(size_t offset) const; + + // Returns true if this instance manages a single contiguous buffer, in which + // case the (optional) output parameter `fragment` is set. Otherwise, the + // function returns false, and `fragment` is left unchanged. + bool IsFlat(absl::string_view* fragment) const; + + // Returns true if the data starting at `offset` with length `len` is + // managed by this instance inside a single contiguous buffer, in which case + // the (optional) output parameter `fragment` is set to the contiguous memory + // starting at offset `offset` with length `length`. Otherwise, the function + // returns false, and `fragment` is left unchanged. + bool IsFlat(size_t offset, size_t len, absl::string_view* fragment) const; + + // Testing only: set capacity to requested capacity. + void SetCapacityForTesting(size_t capacity); + + // Returns the CordRep data pointer for the provided CordRep. + // Requires that the provided `rep` is either a FLAT or EXTERNAL CordRep. + static const char* GetLeafData(const CordRep* rep); + + // Returns the CordRep data pointer for the provided CordRep. + // Requires that `rep` is either a FLAT, EXTERNAL, or SUBSTRING CordRep. + static const char* GetRepData(const CordRep* rep); + + // Advances the provided position, wrapping around capacity as needed. + // Requires `index` < capacity() + inline index_type advance(index_type index) const; + + // Advances the provided position by 'n`, wrapping around capacity as needed. + // Requires `index` < capacity() and `n` <= capacity. + inline index_type advance(index_type index, index_type n) const; + + // Retreats the provided position, wrapping around 0 as needed. + // Requires `index` < capacity() + inline index_type retreat(index_type index) const; + + // Retreats the provided position by 'n', wrapping around 0 as needed. + // Requires `index` < capacity() + inline index_type retreat(index_type index, index_type n) const; + + // Returns the logical begin position of entry `index` + pos_type const& entry_begin_pos(index_type index) const + { + return (index == head_) ? begin_pos_ : entry_end_pos(retreat(index)); + } + + // Returns the physical start offset of entry `index` + size_t entry_start_offset(index_type index) const + { + return Distance(begin_pos_, entry_begin_pos(index)); + } + + // Returns the physical end offset of entry `index` + size_t entry_end_offset(index_type index) const + { + return Distance(begin_pos_, entry_end_pos(index)); + } + + // Returns the data length for entry `index` + size_t entry_length(index_type index) const + { + return Distance(entry_begin_pos(index), entry_end_pos(index)); + } + + // Returns the data for entry `index` + absl::string_view entry_data(index_type index) const; + + // Returns the position for `offset` as {index, prefix}. `index` holds the + // index of the entry at the specified offset and `prefix` holds the relative + // offset inside that entry. + // Requires `offset` < length. + // + // For example we can implement GetCharacter(offset) as: + // char GetCharacter(size_t offset) { + // Position pos = this->Find(offset); + // return this->entry_data(pos.pos)[pos.offset]; + // } + inline Position Find(size_t offset) const; + + // Find starting at `head` + inline Position Find(index_type head, size_t offset) const; + + // Returns the tail position for `offset` as {tail index, suffix}. + // `tail index` holds holds the index of the entry holding the offset directly + // before 'offset` advanced by one. 'suffix` holds the relative offset from + // that relative offset in the entry to the end of the entry. + // For example, FindTail(length) will return {tail(), 0}, FindTail(length - 5) + // will return {retreat(tail), 5)} provided the preceding entry contains at + // least 5 bytes of data. + // Requires offset >= 1 && offset <= length. + // + // This function is very useful in functions that need to clip the end of some + // ring buffer such as 'RemovePrefix'. + // For example, we could implement RemovePrefix for non shared instances as: + // void RemoveSuffix(size_t n) { + // Position pos = FindTail(length - n); + // UnrefEntries(pos.pos, this->tail_); + // this->tail_ = pos.pos; + // entry(retreat(pos.pos)).end_pos -= pos.offset; + // } + inline Position FindTail(size_t offset) const; + + // Find tail starting at `head` + inline Position FindTail(index_type head, size_t offset) const; + + // Invokes f(index_type index) for each entry inside the range [head, tail> + template + void ForEach(index_type head, index_type tail, F&& f) const + { + index_type n1 = (tail > head) ? tail : capacity_; + for (index_type i = head; i < n1; ++i) + f(i); + if (tail <= head) + { + for (index_type i = 0; i < tail; ++i) + f(i); + } + } + + // Invokes f(index_type index) for each entry inside this instance. + template + void ForEach(F&& f) const + { + ForEach(head_, tail_, std::forward(f)); + } + + // Dump this instance's data tp stream `s` in human readable format, excluding + // the actual data content itself. Intended for debug purposes only. + friend std::ostream& operator<<(std::ostream& s, const CordRepRing& rep); + + private: + enum class AddMode + { + kAppend, + kPrepend + }; + + using Layout = container_internal::Layout; + + class Filler; + class Transaction; + class CreateTransaction; + + static constexpr size_t kLayoutAlignment = Layout::Partial().Alignment(); + + // Creates a new CordRepRing. + explicit CordRepRing(index_type capacity) : + capacity_(capacity) + { + } + + // Returns true if `index` is a valid index into this instance. + bool IsValidIndex(index_type index) const; + + // Debug use only: validates the provided CordRepRing invariants. + // Verification of all CordRepRing methods can be enabled by defining + // EXTRA_CORD_RING_VALIDATION, i.e.: `--copts=-DEXTRA_CORD_RING_VALIDATION` + // Verification is VERY expensive, so only do it for debugging purposes. + static CordRepRing* Validate(CordRepRing* rep, const char* file = nullptr, int line = 0); + + // Allocates a CordRepRing large enough to hold `capacity + extra' entries. + // The returned capacity may be larger if the allocated memory allows for it. + // The maximum capacity of a CordRepRing is capped at kMaxCapacity. + // Throws `std::length_error` if `capacity + extra' exceeds kMaxCapacity. + static CordRepRing* New(size_t capacity, size_t extra); + + // Deallocates (but does not destroy) the provided ring buffer. + static void Delete(CordRepRing* rep); + + // Destroys the provided ring buffer, decrementing the reference count of all + // contained child CordReps. The provided 1\`rep` should have a ref count of + // one (pre decrement destroy call observing `refcount.IsOne()`) or zero + // (post decrement destroy call observing `!refcount.Decrement()`). + static void Destroy(CordRepRing* rep); + + // Returns a mutable reference to the logical end position array. + pos_type* entry_end_pos() + { + return Layout::Partial().Pointer<0>(data_); + } + + // Returns a mutable reference to the child pointer array. + CordRep** entry_child() + { + return Layout::Partial(capacity()).Pointer<1>(data_); + } + + // Returns a mutable reference to the data offset array. + offset_type* entry_data_offset() + { + return Layout::Partial(capacity(), capacity()).Pointer<2>(data_); + } + + // Find implementations for the non fast path 0 / length cases. + Position FindSlow(index_type head, size_t offset) const; + Position FindTailSlow(index_type head, size_t offset) const; + + // Finds the index of the first node that is inside a reasonable distance + // of the node at `offset` from which we can continue with a linear search. + template + index_type FindBinary(index_type head, index_type tail, size_t offset) const; + + // Fills the current (initialized) instance from the provided source, copying + // entries [head, tail). Adds a reference to copied entries if `ref` is true. + template + void Fill(const CordRepRing* src, index_type head, index_type tail); + + // Create a copy of 'rep', copying all entries [head, tail), allocating room + // for `extra` entries. Adds a reference on all copied entries. + static CordRepRing* Copy(CordRepRing* rep, index_type head, index_type tail, size_t extra = 0); + + // Returns a Mutable CordRepRing reference from `rep` with room for at least + // `extra` additional nodes. Adopts a reference count from `rep`. + // This function will return `rep` if, and only if: + // - rep.entries + extra <= rep.capacity + // - rep.refcount == 1 + // Otherwise, this function will create a new copy of `rep` with additional + // capacity to satisfy `extra` extra nodes, and unref the old `rep` instance. + // + // If a new CordRepRing can not be allocated, or the new capacity would exceed + // the maximum capacity, then the input is consumed only, and an exception is + // thrown. + static CordRepRing* Mutable(CordRepRing* rep, size_t extra); + + // Slow path for Append(CordRepRing* rep, CordRep* child). This function is + // exercised if the provided `child` in Append() is not a leaf node, i.e., a + // ring buffer or old (concat) cord tree. + static CordRepRing* AppendSlow(CordRepRing* rep, CordRep* child); + + // Appends the provided leaf node. Requires `child` to be FLAT or EXTERNAL. + static CordRepRing* AppendLeaf(CordRepRing* rep, CordRep* child, size_t offset, size_t length); + + // Prepends the provided leaf node. Requires `child` to be FLAT or EXTERNAL. + static CordRepRing* PrependLeaf(CordRepRing* rep, CordRep* child, size_t offset, size_t length); + + // Slow path for Prepend(CordRepRing* rep, CordRep* child). This function is + // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a + // ring buffer or old (concat) cord tree. + static CordRepRing* PrependSlow(CordRepRing* rep, CordRep* child); + + // Slow path for Create(CordRep* child, size_t extra). This function is + // exercised if the provided `child` in Prepend() is not a leaf node, i.e., a + // ring buffer or old (concat) cord tree. + static CordRepRing* CreateSlow(CordRep* child, size_t extra); + + // Creates a new ring buffer from the provided `child` leaf node. Requires + // `child` to be FLAT or EXTERNAL. on `rep`. + // The returned ring buffer has a capacity of at least `1 + extra` + static CordRepRing* CreateFromLeaf(CordRep* child, size_t offset, size_t length, size_t extra); + + // Appends or prepends (depending on AddMode) the ring buffer in `ring' to + // `rep` starting at `offset` with length `len`. + template + static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring, size_t offset, size_t len); + + // Increases the data offset for entry `index` by `n`. + void AddDataOffset(index_type index, size_t n); + + // Decreases the length for entry `index` by `n`. + void SubLength(index_type index, size_t n); + + index_type head_; + index_type tail_; + index_type capacity_; + pos_type begin_pos_; + + alignas(kLayoutAlignment) char data_[kLayoutAlignment]; + + friend struct CordRep; + }; + + constexpr size_t CordRepRing::AllocSize(size_t capacity) + { + return sizeof(CordRepRing) - sizeof(data_) + + Layout(capacity, capacity, capacity).AllocSize(); + } + + inline constexpr size_t CordRepRing::Distance(pos_type pos, pos_type end_pos) + { + return (end_pos - pos); + } + + inline const char* CordRepRing::GetLeafData(const CordRep* rep) + { + return rep->tag != EXTERNAL ? rep->flat()->Data() : rep->external()->base; + } + + inline const char* CordRepRing::GetRepData(const CordRep* rep) + { + if (rep->tag >= FLAT) + return rep->flat()->Data(); + if (rep->tag == EXTERNAL) + return rep->external()->base; + return GetLeafData(rep->substring()->child) + rep->substring()->start; + } + + inline CordRepRing::index_type CordRepRing::advance(index_type index) const + { + assert(index < capacity_); + return ++index == capacity_ ? 0 : index; + } + + inline CordRepRing::index_type CordRepRing::advance(index_type index, index_type n) const + { + assert(index < capacity_ && n <= capacity_); + return (index += n) >= capacity_ ? index - capacity_ : index; + } + + inline CordRepRing::index_type CordRepRing::retreat(index_type index) const + { + assert(index < capacity_); + return (index > 0 ? index : capacity_) - 1; + } + + inline CordRepRing::index_type CordRepRing::retreat(index_type index, index_type n) const + { + assert(index < capacity_ && n <= capacity_); + return index >= n ? index - n : capacity_ - n + index; + } + + inline absl::string_view CordRepRing::entry_data(index_type index) const + { + size_t data_offset = entry_data_offset(index); + return {GetRepData(entry_child(index)) + data_offset, entry_length(index)}; + } + + inline bool CordRepRing::IsValidIndex(index_type index) const + { + if (index >= capacity_) + return false; + return (tail_ > head_) ? (index >= head_ && index < tail_) : (index >= head_ || index < tail_); + } + +#ifndef EXTRA_CORD_RING_VALIDATION + inline CordRepRing* CordRepRing::Validate(CordRepRing* rep, const char* /*file*/, int /*line*/) + { + return rep; + } +#endif + + inline CordRepRing::Position CordRepRing::Find(size_t offset) const + { + assert(offset < length); + return (offset == 0) ? Position{head_, 0} : FindSlow(head_, offset); + } + + inline CordRepRing::Position CordRepRing::Find(index_type head, size_t offset) const + { + assert(offset < length); + assert(IsValidIndex(head) && offset >= entry_start_offset(head)); + return (offset == 0) ? Position{head_, 0} : FindSlow(head, offset); + } + + inline CordRepRing::Position CordRepRing::FindTail(size_t offset) const + { + assert(offset > 0 && offset <= length); + return (offset == length) ? Position{tail_, 0} : FindTailSlow(head_, offset); + } + + inline CordRepRing::Position CordRepRing::FindTail(index_type head, size_t offset) const + { + assert(offset > 0 && offset <= length); + assert(IsValidIndex(head) && offset >= entry_start_offset(head) + 1); + return (offset == length) ? Position{tail_, 0} : FindTailSlow(head, offset); + } + + // Now that CordRepRing is defined, we can define CordRep's helper casts: + inline CordRepRing* CordRep::ring() + { + assert(IsRing()); + return static_cast(this); + } + + inline const CordRepRing* CordRep::ring() const + { + assert(IsRing()); + return static_cast(this); + } + + inline bool CordRepRing::IsFlat(absl::string_view* fragment) const + { + if (entries() == 1) + { + if (fragment) + *fragment = entry_data(head()); + return true; + } + return false; + } + + inline bool CordRepRing::IsFlat(size_t offset, size_t len, absl::string_view* fragment) const + { + const Position pos = Find(offset); + const absl::string_view data = entry_data(pos.index); + if (data.length() >= len && data.length() - len >= pos.offset) + { + if (fragment) + *fragment = data.substr(pos.offset, len); + return true; + } + return false; + } + + std::ostream& operator<<(std::ostream& s, const CordRepRing& rep); + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring_reader.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring_reader.h new file mode 100644 index 00000000..45e9af4b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring_reader.h @@ -0,0 +1,142 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_ + +#include +#include +#include + +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_ring.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordRepRingReader provides basic navigation over CordRepRing data. + class CordRepRingReader + { + public: + // Returns true if this instance is not empty. + explicit operator bool() const + { + return ring_ != nullptr; + } + + // Returns the ring buffer reference for this instance, or nullptr if empty. + CordRepRing* ring() const + { + return ring_; + } + + // Returns the current node index inside the ring buffer for this instance. + // The returned value is undefined if this instance is empty. + CordRepRing::index_type index() const + { + return index_; + } + + // Returns the current node inside the ring buffer for this instance. + // The returned value is undefined if this instance is empty. + CordRep* node() const + { + return ring_->entry_child(index_); + } + + // Returns the length of the referenced ring buffer. + // Requires the current instance to be non empty. + size_t length() const + { + assert(ring_); + return ring_->length; + } + + // Returns the end offset of the last navigated-to chunk, which represents the + // total bytes 'consumed' relative to the start of the ring. The returned + // value is never zero. For example, initializing a reader with a ring buffer + // with a first chunk of 19 bytes will return consumed() = 19. + // Requires the current instance to be non empty. + size_t consumed() const + { + assert(ring_); + return ring_->entry_end_offset(index_); + } + + // Returns the number of bytes remaining beyond the last navigated-to chunk. + // Requires the current instance to be non empty. + size_t remaining() const + { + assert(ring_); + return length() - consumed(); + } + + // Resets this instance to an empty value + void Reset() + { + ring_ = nullptr; + } + + // Resets this instance to the start of `ring`. `ring` must not be null. + // Returns a reference into the first chunk of the provided ring. + absl::string_view Reset(CordRepRing* ring) + { + assert(ring); + ring_ = ring; + index_ = ring_->head(); + return ring_->entry_data(index_); + } + + // Navigates to the next chunk inside the reference ring buffer. + // Returns a reference into the navigated-to chunk. + // Requires remaining() to be non zero. + absl::string_view Next() + { + assert(remaining()); + index_ = ring_->advance(index_); + return ring_->entry_data(index_); + } + + // Navigates to the chunk at offset `offset`. + // Returns a reference into the navigated-to chunk, adjusted for the relative + // position of `offset` into that chunk. For example, calling Seek(13) on a + // ring buffer containing 2 chunks of 10 and 20 bytes respectively will return + // a string view into the second chunk starting at offset 3 with a size of 17. + // Requires `offset` to be less than `length()` + absl::string_view Seek(size_t offset) + { + assert(offset < length()); + size_t current = ring_->entry_end_offset(index_); + CordRepRing::index_type hint = (offset >= current) ? index_ : ring_->head(); + const CordRepRing::Position head = ring_->Find(hint, offset); + index_ = head.index; + auto data = ring_->entry_data(head.index); + data.remove_prefix(head.offset); + return data; + } + + private: + CordRepRing* ring_ = nullptr; + CordRepRing::index_type index_; + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_RING_READER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_test_util.h b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_test_util.h new file mode 100644 index 00000000..98af8bdb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_test_util.h @@ -0,0 +1,250 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_ +#define ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cord_rep_btree.h" +#include "absl/strings/internal/cord_rep_flat.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cordrep_testing + { + + inline cord_internal::CordRepSubstring* MakeSubstring( + size_t start, size_t len, cord_internal::CordRep* rep + ) + { + auto* sub = new cord_internal::CordRepSubstring; + sub->tag = cord_internal::SUBSTRING; + sub->start = start; + sub->length = len <= 0 ? rep->length - start + len : len; + sub->child = rep; + return sub; + } + + inline cord_internal::CordRepFlat* MakeFlat(absl::string_view value) + { + assert(value.length() <= cord_internal::kMaxFlatLength); + auto* flat = cord_internal::CordRepFlat::New(value.length()); + flat->length = value.length(); + memcpy(flat->Data(), value.data(), value.length()); + return flat; + } + + // Creates an external node for testing + inline cord_internal::CordRepExternal* MakeExternal(absl::string_view s) + { + struct Rep : public cord_internal::CordRepExternal + { + std::string s; + explicit Rep(absl::string_view sv) : + s(sv) + { + this->tag = cord_internal::EXTERNAL; + this->base = s.data(); + this->length = s.length(); + this->releaser_invoker = [](cord_internal::CordRepExternal* self) + { + delete static_cast(self); + }; + } + }; + return new Rep(s); + } + + inline std::string CreateRandomString(size_t n) + { + absl::string_view data = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789~!@#$%^&*()_+=-<>?:\"{}[]|"; + std::minstd_rand rnd; + std::uniform_int_distribution dist(0, data.size() - 1); + std::string s(n, ' '); + for (size_t i = 0; i < n; ++i) + { + s[i] = data[dist(rnd)]; + } + return s; + } + + // Creates an array of flats from the provided string, chopping + // the provided string up into flats of size `chunk_size` characters + // resulting in roughly `data.size() / chunk_size` total flats. + inline std::vector CreateFlatsFromString( + absl::string_view data, size_t chunk_size + ) + { + assert(chunk_size > 0); + std::vector flats; + for (absl::string_view s = data; !s.empty(); s.remove_prefix(chunk_size)) + { + flats.push_back(MakeFlat(s.substr(0, chunk_size))); + } + return flats; + } + + inline cord_internal::CordRepBtree* CordRepBtreeFromFlats( + absl::Span flats + ) + { + assert(!flats.empty()); + auto* node = cord_internal::CordRepBtree::Create(flats[0]); + for (size_t i = 1; i < flats.size(); ++i) + { + node = cord_internal::CordRepBtree::Append(node, flats[i]); + } + return node; + } + + template + inline void CordVisitReps(cord_internal::CordRep* rep, Fn&& fn) + { + fn(rep); + while (rep->tag == cord_internal::SUBSTRING) + { + rep = rep->substring()->child; + fn(rep); + } + if (rep->tag == cord_internal::BTREE) + { + for (cord_internal::CordRep* edge : rep->btree()->Edges()) + { + CordVisitReps(edge, fn); + } + } + } + + template + inline std::vector CordCollectRepsIf( + Predicate&& predicate, cord_internal::CordRep* rep + ) + { + std::vector reps; + CordVisitReps(rep, [&reps, &predicate](cord_internal::CordRep* rep) + { + if (predicate(rep)) reps.push_back(rep); }); + return reps; + } + + inline std::vector CordCollectReps( + cord_internal::CordRep* rep + ) + { + std::vector reps; + auto fn = [&reps](cord_internal::CordRep* rep) + { reps.push_back(rep); }; + CordVisitReps(rep, fn); + return reps; + } + + inline void CordToString(cord_internal::CordRep* rep, std::string& s) + { + size_t offset = 0; + size_t length = rep->length; + while (rep->tag == cord_internal::SUBSTRING) + { + offset += rep->substring()->start; + rep = rep->substring()->child; + } + if (rep->tag == cord_internal::BTREE) + { + for (cord_internal::CordRep* edge : rep->btree()->Edges()) + { + CordToString(edge, s); + } + } + else if (rep->tag >= cord_internal::FLAT) + { + s.append(rep->flat()->Data() + offset, length); + } + else if (rep->tag == cord_internal::EXTERNAL) + { + s.append(rep->external()->base + offset, length); + } + else + { + ABSL_RAW_LOG(FATAL, "Unsupported tag %d", rep->tag); + } + } + + inline std::string CordToString(cord_internal::CordRep* rep) + { + std::string s; + s.reserve(rep->length); + CordToString(rep, s); + return s; + } + + // RAII Helper class to automatically unref reps on destruction. + class AutoUnref + { + public: + ~AutoUnref() + { + for (CordRep* rep : unrefs_) + CordRep::Unref(rep); + } + + // Adds `rep` to the list of reps to be unreffed at destruction. + template + CordRepType* Add(CordRepType* rep) + { + unrefs_.push_back(rep); + return rep; + } + + // Increments the reference count of `rep` by one, and adds it to + // the list of reps to be unreffed at destruction. + template + CordRepType* Ref(CordRepType* rep) + { + unrefs_.push_back(CordRep::Ref(rep)); + return rep; + } + + // Increments the reference count of `rep` by one if `condition` is true, + // and adds it to the list of reps to be unreffed at destruction. + template + CordRepType* RefIf(bool condition, CordRepType* rep) + { + if (condition) + unrefs_.push_back(CordRep::Ref(rep)); + return rep; + } + + private: + using CordRep = absl::cord_internal::CordRep; + + std::vector unrefs_; + }; + + } // namespace cordrep_testing + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORD_REP_TEST_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_functions.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_functions.h new file mode 100644 index 00000000..49e70709 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_functions.h @@ -0,0 +1,86 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // Returns the current sample rate. This represents the average interval + // between samples. + int32_t get_cordz_mean_interval(); + + // Sets the sample rate with the average interval between samples. + void set_cordz_mean_interval(int32_t mean_interval); + +// Cordz is only enabled on Linux with thread_local support. +#if defined(ABSL_INTERNAL_CORDZ_ENABLED) +#error ABSL_INTERNAL_CORDZ_ENABLED cannot be set directly +#elif defined(__linux__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_INTERNAL_CORDZ_ENABLED 1 +#endif + +#ifdef ABSL_INTERNAL_CORDZ_ENABLED + + // cordz_next_sample is the number of events until the next sample event. If + // the value is 1 or less, the code will check on the next event if cordz is + // enabled, and if so, will sample the Cord. cordz is only enabled when we can + // use thread locals. + ABSL_CONST_INIT extern thread_local int64_t cordz_next_sample; + + // Determines if the next sample should be profiled. If it is, the value pointed + // at by next_sample will be set with the interval until the next sample. + bool cordz_should_profile_slow(); + + // Returns true if the next cord should be sampled. + inline bool cordz_should_profile() + { + if (ABSL_PREDICT_TRUE(cordz_next_sample > 1)) + { + cordz_next_sample--; + return false; + } + return cordz_should_profile_slow(); + } + + // Sets the interval until the next sample (for testing only) + void cordz_set_next_sample_for_testing(int64_t next_sample); + +#else // ABSL_INTERNAL_CORDZ_ENABLED + + inline bool cordz_should_profile() + { + return false; + } + inline void cordz_set_next_sample_for_testing(int64_t) + { + } + +#endif // ABSL_INTERNAL_CORDZ_ENABLED + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_handle.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_handle.h new file mode 100644 index 00000000..6ae73e11 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_handle.h @@ -0,0 +1,111 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // This base class allows multiple types of object (CordzInfo and + // CordzSampleToken) to exist simultaneously on the delete queue (pointed to by + // global_dq_tail and traversed using dq_prev_ and dq_next_). The + // delete queue guarantees that once a profiler creates a CordzSampleToken and + // has gained visibility into a CordzInfo object, that CordzInfo object will not + // be deleted prematurely. This allows the profiler to inspect all CordzInfo + // objects that are alive without needing to hold a global lock. + class ABSL_DLL CordzHandle + { + public: + CordzHandle() : + CordzHandle(false) + { + } + + bool is_snapshot() const + { + return is_snapshot_; + } + + // Returns true if this instance is safe to be deleted because it is either a + // snapshot, which is always safe to delete, or not included in the global + // delete queue and thus not included in any snapshot. + // Callers are responsible for making sure this instance can not be newly + // discovered by other threads. For example, CordzInfo instances first de-list + // themselves from the global CordzInfo list before determining if they are + // safe to be deleted directly. + // If SafeToDelete returns false, callers MUST use the Delete() method to + // safely queue CordzHandle instances for deletion. + bool SafeToDelete() const; + + // Deletes the provided instance, or puts it on the delete queue to be deleted + // once there are no more sample tokens (snapshot) instances potentially + // referencing the instance. `handle` should not be null. + static void Delete(CordzHandle* handle); + + // Returns the current entries in the delete queue in LIFO order. + static std::vector DiagnosticsGetDeleteQueue(); + + // Returns true if the provided handle is nullptr or guarded by this handle. + // Since the CordzSnapshot token is itself a CordzHandle, this method will + // allow tests to check if that token is keeping an arbitrary CordzHandle + // alive. + bool DiagnosticsHandleIsSafeToInspect(const CordzHandle* handle) const; + + // Returns the current entries in the delete queue, in LIFO order, that are + // protected by this. CordzHandle objects are only placed on the delete queue + // after CordzHandle::Delete is called with them as an argument. Only + // CordzHandle objects that are not also CordzSnapshot objects will be + // included in the return vector. For each of the handles in the return + // vector, the earliest that their memory can be freed is when this + // CordzSnapshot object is deleted. + std::vector DiagnosticsGetSafeToInspectDeletedHandles(); + + protected: + explicit CordzHandle(bool is_snapshot); + virtual ~CordzHandle(); + + private: + const bool is_snapshot_; + + // dq_prev_ and dq_next_ require the global queue mutex to be held. + // Unfortunately we can't use thread annotations such that the thread safety + // analysis understands that queue_ and global_queue_ are one and the same. + CordzHandle* dq_prev_ = nullptr; + CordzHandle* dq_next_ = nullptr; + }; + + class CordzSnapshot : public CordzHandle + { + public: + CordzSnapshot() : + CordzHandle(true) + { + } + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_HANDLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_info.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_info.h new file mode 100644 index 00000000..1a18bf55 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_info.h @@ -0,0 +1,317 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/thread_annotations.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cordz_functions.h" +#include "absl/strings/internal/cordz_handle.h" +#include "absl/strings/internal/cordz_statistics.h" +#include "absl/strings/internal/cordz_update_tracker.h" +#include "absl/synchronization/mutex.h" +#include "absl/types/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordzInfo tracks a profiled Cord. Each of these objects can be in two places. + // If a Cord is alive, the CordzInfo will be in the global_cordz_infos map, and + // can also be retrieved via the linked list starting with + // global_cordz_infos_head and continued via the cordz_info_next() method. When + // a Cord has reached the end of its lifespan, the CordzInfo object will be + // migrated out of the global_cordz_infos list and the global_cordz_infos_map, + // and will either be deleted or appended to the global_delete_queue. If it is + // placed on the global_delete_queue, the CordzInfo object will be cleaned in + // the destructor of a CordzSampleToken object. + class ABSL_LOCKABLE CordzInfo : public CordzHandle + { + public: + using MethodIdentifier = CordzUpdateTracker::MethodIdentifier; + + // TrackCord creates a CordzInfo instance which tracks important metrics of + // a sampled cord, and stores the created CordzInfo instance into `cord'. All + // CordzInfo instances are placed in a global list which is used to discover + // and snapshot all actively tracked cords. Callers are responsible for + // calling UntrackCord() before the tracked Cord instance is deleted, or to + // stop tracking the sampled Cord. Callers are also responsible for guarding + // changes to the 'tree' value of a Cord (InlineData.tree) through the Lock() + // and Unlock() calls. Any change resulting in a new tree value for the cord + // requires a call to SetCordRep() before the old tree has been unreffed + // and/or deleted. `method` identifies the Cord public API method initiating + // the cord to be sampled. + // Requires `cord` to hold a tree, and `cord.cordz_info()` to be null. + static void TrackCord(InlineData& cord, MethodIdentifier method); + + // Identical to TrackCord(), except that this function fills the + // `parent_stack` and `parent_method` properties of the returned CordzInfo + // instance from the provided `src` instance if `src` is sampled. + // This function should be used for sampling 'copy constructed' and 'copy + // assigned' cords. This function allows 'cord` to be already sampled, in + // which case the CordzInfo will be newly created from `src`. + static void TrackCord(InlineData& cord, const InlineData& src, MethodIdentifier method); + + // Maybe sample the cord identified by 'cord' for method 'method'. + // Uses `cordz_should_profile` to randomly pick cords to be sampled, and if + // so, invokes `TrackCord` to start sampling `cord`. + static void MaybeTrackCord(InlineData& cord, MethodIdentifier method); + + // Maybe sample the cord identified by 'cord' for method 'method'. + // `src` identifies a 'parent' cord which is assigned to `cord`, typically the + // input cord for a copy constructor, or an assign method such as `operator=` + // `cord` will be sampled if (and only if) `src` is sampled. + // If `cord` is currently being sampled and `src` is not being sampled, then + // this function will stop sampling the cord and reset the cord's cordz_info. + // + // Previously this function defined that `cord` will be sampled if either + // `src` is sampled, or if `cord` is randomly picked for sampling. However, + // this can cause issues, as there may be paths where some cord is assigned an + // indirect copy of it's own value. As such a 'string of copies' would then + // remain sampled (`src.is_profiled`), then assigning such a cord back to + // 'itself' creates a cycle where the cord will converge to 'always sampled`. + // + // For example: + // + // Cord x; + // for (...) { + // // Copy ctor --> y.is_profiled := x.is_profiled | random(...) + // Cord y = x; + // ... + // // Assign x = y --> x.is_profiled = y.is_profiled | random(...) + // // ==> x.is_profiled |= random(...) + // // ==> x converges to 'always profiled' + // x = y; + // } + static void MaybeTrackCord(InlineData& cord, const InlineData& src, MethodIdentifier method); + + // Stops tracking changes for a sampled cord, and deletes the provided info. + // This function must be called before the sampled cord instance is deleted, + // and before the root cordrep of the sampled cord is unreffed. + // This function may extend the lifetime of the cordrep in cases where the + // CordInfo instance is being held by a concurrent collection thread. + void Untrack(); + + // Invokes UntrackCord() on `info` if `info` is not null. + static void MaybeUntrackCord(CordzInfo* info); + + CordzInfo() = delete; + CordzInfo(const CordzInfo&) = delete; + CordzInfo& operator=(const CordzInfo&) = delete; + + // Retrieves the oldest existing CordzInfo. + static CordzInfo* Head(const CordzSnapshot& snapshot) + ABSL_NO_THREAD_SAFETY_ANALYSIS; + + // Retrieves the next oldest existing CordzInfo older than 'this' instance. + CordzInfo* Next(const CordzSnapshot& snapshot) const + ABSL_NO_THREAD_SAFETY_ANALYSIS; + + // Locks this instance for the update identified by `method`. + // Increases the count for `method` in `update_tracker`. + void Lock(MethodIdentifier method) ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_); + + // Unlocks this instance. If the contained `rep` has been set to null + // indicating the Cord has been cleared or is otherwise no longer sampled, + // then this method will delete this CordzInfo instance. + void Unlock() ABSL_UNLOCK_FUNCTION(mutex_); + + // Asserts that this CordzInfo instance is locked. + void AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_); + + // Updates the `rep` property of this instance. This methods is invoked by + // Cord logic each time the root node of a sampled Cord changes, and before + // the old root reference count is deleted. This guarantees that collection + // code can always safely take a reference on the tracked cord. + // Requires a lock to be held through the `Lock()` method. + // TODO(b/117940323): annotate with ABSL_EXCLUSIVE_LOCKS_REQUIRED once all + // Cord code is in a state where this can be proven true by the compiler. + void SetCordRep(CordRep* rep); + + // Returns the current `rep` property of this instance with a reference + // added, or null if this instance represents a cord that has since been + // deleted or untracked. + CordRep* RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_); + + // Returns the current value of `rep_` for testing purposes only. + CordRep* GetCordRepForTesting() const ABSL_NO_THREAD_SAFETY_ANALYSIS + { + return rep_; + } + + // Sets the current value of `rep_` for testing purposes only. + void SetCordRepForTesting(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS + { + rep_ = rep; + } + + // Returns the stack trace for where the cord was first sampled. Cords are + // potentially sampled when they promote from an inlined cord to a tree or + // ring representation, which is not necessarily the location where the cord + // was first created. Some cords are created as inlined cords, and only as + // data is added do they become a non-inlined cord. However, typically the + // location represents reasonably well where the cord is 'created'. + absl::Span GetStack() const; + + // Returns the stack trace for a sampled cord's 'parent stack trace'. This + // value may be set if the cord is sampled (promoted) after being created + // from, or being assigned the value of an existing (sampled) cord. + absl::Span GetParentStack() const; + + // Retrieves the CordzStatistics associated with this Cord. The statistics + // are only updated when a Cord goes through a mutation, such as an Append + // or RemovePrefix. + CordzStatistics GetCordzStatistics() const; + + private: + using SpinLock = absl::base_internal::SpinLock; + using SpinLockHolder = ::absl::base_internal::SpinLockHolder; + + // Global cordz info list. CordzInfo stores a pointer to the global list + // instance to harden against ODR violations. + struct List + { + constexpr explicit List(absl::ConstInitType) : + mutex(absl::kConstInit, absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + { + } + + SpinLock mutex; + std::atomic head ABSL_GUARDED_BY(mutex){nullptr}; + }; + + static constexpr size_t kMaxStackDepth = 64; + + explicit CordzInfo(CordRep* rep, const CordzInfo* src, MethodIdentifier method); + ~CordzInfo() override; + + // Sets `rep_` without holding a lock. + void UnsafeSetCordRep(CordRep* rep) ABSL_NO_THREAD_SAFETY_ANALYSIS; + + void Track(); + + // Returns the parent method from `src`, which is either `parent_method_` or + // `method_` depending on `parent_method_` being kUnknown. + // Returns kUnknown if `src` is null. + static MethodIdentifier GetParentMethod(const CordzInfo* src); + + // Fills the provided stack from `src`, copying either `parent_stack_` or + // `stack_` depending on `parent_stack_` being empty, returning the size of + // the parent stack. + // Returns 0 if `src` is null. + static size_t FillParentStack(const CordzInfo* src, void** stack); + + void ODRCheck() const + { +#ifndef NDEBUG + ABSL_RAW_CHECK(list_ == &global_list_, "ODR violation in Cord"); +#endif + } + + // Non-inlined implementation of `MaybeTrackCord`, which is executed if + // either `src` is sampled or `cord` is sampled, and either untracks or + // tracks `cord` as documented per `MaybeTrackCord`. + static void MaybeTrackCordImpl(InlineData& cord, const InlineData& src, MethodIdentifier method); + + ABSL_CONST_INIT static List global_list_; + List* const list_ = &global_list_; + + // ci_prev_ and ci_next_ require the global list mutex to be held. + // Unfortunately we can't use thread annotations such that the thread safety + // analysis understands that list_ and global_list_ are one and the same. + std::atomic ci_prev_{nullptr}; + std::atomic ci_next_{nullptr}; + + mutable absl::Mutex mutex_; + CordRep* rep_ ABSL_GUARDED_BY(mutex_); + + void* stack_[kMaxStackDepth]; + void* parent_stack_[kMaxStackDepth]; + const size_t stack_depth_; + const size_t parent_stack_depth_; + const MethodIdentifier method_; + const MethodIdentifier parent_method_; + CordzUpdateTracker update_tracker_; + const absl::Time create_time_; + }; + + inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord( + InlineData& cord, MethodIdentifier method + ) + { + if (ABSL_PREDICT_FALSE(cordz_should_profile())) + { + TrackCord(cord, method); + } + } + + inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord( + InlineData& cord, const InlineData& src, MethodIdentifier method + ) + { + if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src))) + { + MaybeTrackCordImpl(cord, src, method); + } + } + + inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeUntrackCord( + CordzInfo* info + ) + { + if (ABSL_PREDICT_FALSE(info)) + { + info->Untrack(); + } + } + + inline void CordzInfo::AssertHeld() ABSL_ASSERT_EXCLUSIVE_LOCK(mutex_) + { +#ifndef NDEBUG + mutex_.AssertHeld(); +#endif + } + + inline void CordzInfo::SetCordRep(CordRep* rep) + { + AssertHeld(); + rep_ = rep; + } + + inline void CordzInfo::UnsafeSetCordRep(CordRep* rep) + { + rep_ = rep; + } + + inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) + { + MutexLock lock(&mutex_); + return rep_ ? CordRep::Ref(rep_) : nullptr; + } + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_INFO_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_sample_token.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_sample_token.h new file mode 100644 index 00000000..24438985 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_sample_token.h @@ -0,0 +1,107 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" +#include "absl/strings/internal/cordz_handle.h" +#include "absl/strings/internal/cordz_info.h" + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // The existence of a CordzSampleToken guarantees that a reader can traverse the + // global_cordz_infos_head linked-list without needing to hold a mutex. When a + // CordzSampleToken exists, all CordzInfo objects that would be destroyed are + // instead appended to a deletion queue. When the CordzSampleToken is destroyed, + // it will also clean up any of these CordzInfo objects. + // + // E.g., ST are CordzSampleToken objects and CH are CordzHandle objects. + // ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail + // + // This list tracks that CH1 and CH2 were created after ST1, so the thread + // holding ST1 might have a reference to CH1, CH2, ST2, and CH3. However, ST2 + // was created later, so the thread holding the ST2 token cannot have a + // reference to ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will + // delete ST1, CH1, and CH2. If instead ST2 is cleaned up first, that thread + // will only delete ST2. + // + // If ST1 is cleaned up first, the new list will be: + // ST2 <- CH3 <- global_delete_queue_tail + // + // If ST2 is cleaned up first, the new list will be: + // ST1 <- CH1 <- CH2 <- CH3 <- global_delete_queue_tail + // + // All new CordzHandle objects are appended to the list, so if a new thread + // comes along before either ST1 or ST2 are cleaned up, the new list will be: + // ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- ST3 <- global_delete_queue_tail + // + // A thread must hold the global_delete_queue_mu mutex whenever it's altering + // this list. + // + // It is safe for thread that holds a CordzSampleToken to read + // global_cordz_infos at any time since the objects it is able to retrieve will + // not be deleted while the CordzSampleToken exists. + class CordzSampleToken : public CordzSnapshot + { + public: + class Iterator + { + public: + using iterator_category = std::input_iterator_tag; + using value_type = const CordzInfo&; + using difference_type = ptrdiff_t; + using pointer = const CordzInfo*; + using reference = value_type; + + Iterator() = default; + + Iterator& operator++(); + Iterator operator++(int); + friend bool operator==(const Iterator& lhs, const Iterator& rhs); + friend bool operator!=(const Iterator& lhs, const Iterator& rhs); + reference operator*() const; + pointer operator->() const; + + private: + friend class CordzSampleToken; + explicit Iterator(const CordzSampleToken* token); + + const CordzSampleToken* token_ = nullptr; + pointer current_ = nullptr; + }; + + CordzSampleToken() = default; + CordzSampleToken(const CordzSampleToken&) = delete; + CordzSampleToken& operator=(const CordzSampleToken&) = delete; + + Iterator begin() + { + return Iterator(this); + } + Iterator end() + { + return Iterator(); + } + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_SAMPLE_TOKEN_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_statistics.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_statistics.h new file mode 100644 index 00000000..3c1e3607 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_statistics.h @@ -0,0 +1,92 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_ + +#include + +#include "absl/base/config.h" +#include "absl/strings/internal/cordz_update_tracker.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordzStatistics captures some meta information about a Cord's shape. + struct CordzStatistics + { + using MethodIdentifier = CordzUpdateTracker::MethodIdentifier; + + // Node counts information + struct NodeCounts + { + size_t flat = 0; // #flats + size_t flat_64 = 0; // #flats up to 64 bytes + size_t flat_128 = 0; // #flats up to 128 bytes + size_t flat_256 = 0; // #flats up to 256 bytes + size_t flat_512 = 0; // #flats up to 512 bytes + size_t flat_1k = 0; // #flats up to 1K bytes + size_t external = 0; // #external reps + size_t substring = 0; // #substring reps + size_t concat = 0; // #concat reps + size_t ring = 0; // #ring buffer reps + size_t btree = 0; // #btree reps + size_t crc = 0; // #crc reps + }; + + // The size of the cord in bytes. This matches the result of Cord::size(). + size_t size = 0; + + // The estimated memory used by the sampled cord. This value matches the + // value as reported by Cord::EstimatedMemoryUsage(). + // A value of 0 implies the property has not been recorded. + size_t estimated_memory_usage = 0; + + // The effective memory used by the sampled cord, inversely weighted by the + // effective indegree of each allocated node. This is a representation of the + // fair share of memory usage that should be attributed to the sampled cord. + // This value is more useful for cases where one or more nodes are referenced + // by multiple Cord instances, and for cases where a Cord includes the same + // node multiple times (either directly or indirectly). + // A value of 0 implies the property has not been recorded. + size_t estimated_fair_share_memory_usage = 0; + + // The total number of nodes referenced by this cord. + // For ring buffer Cords, this includes the 'ring buffer' node. + // For btree Cords, this includes all 'CordRepBtree' tree nodes as well as all + // the substring, flat and external nodes referenced by the tree. + // A value of 0 implies the property has not been recorded. + size_t node_count = 0; + + // Detailed node counts per type + NodeCounts node_counts; + + // The cord method responsible for sampling the cord. + MethodIdentifier method = MethodIdentifier::kUnknown; + + // The cord method responsible for sampling the parent cord if applicable. + MethodIdentifier parent_method = MethodIdentifier::kUnknown; + + // Update tracker tracking invocation count per cord method. + CordzUpdateTracker update_tracker; + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_STATISTICS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_scope.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_scope.h new file mode 100644 index 00000000..3fa306a8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_scope.h @@ -0,0 +1,83 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_ + +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/thread_annotations.h" +#include "absl/strings/internal/cord_internal.h" +#include "absl/strings/internal/cordz_info.h" +#include "absl/strings/internal/cordz_update_tracker.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordzUpdateScope scopes an update to the provided CordzInfo. + // The class invokes `info->Lock(method)` and `info->Unlock()` to guard + // cordrep updates. This class does nothing if `info` is null. + // See also the 'Lock`, `Unlock` and `SetCordRep` methods in `CordzInfo`. + class ABSL_SCOPED_LOCKABLE CordzUpdateScope + { + public: + CordzUpdateScope(CordzInfo* info, CordzUpdateTracker::MethodIdentifier method) + ABSL_EXCLUSIVE_LOCK_FUNCTION(info) : + info_(info) + { + if (ABSL_PREDICT_FALSE(info_)) + { + info->Lock(method); + } + } + + // CordzUpdateScope can not be copied or assigned to. + CordzUpdateScope(CordzUpdateScope&& rhs) = delete; + CordzUpdateScope(const CordzUpdateScope&) = delete; + CordzUpdateScope& operator=(CordzUpdateScope&& rhs) = delete; + CordzUpdateScope& operator=(const CordzUpdateScope&) = delete; + + ~CordzUpdateScope() ABSL_UNLOCK_FUNCTION() + { + if (ABSL_PREDICT_FALSE(info_)) + { + info_->Unlock(); + } + } + + void SetCordRep(CordRep* rep) const + { + if (ABSL_PREDICT_FALSE(info_)) + { + info_->SetCordRep(rep); + } + } + + CordzInfo* info() const + { + return info_; + } + + private: + CordzInfo* info_; + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_SCOPE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_tracker.h b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_tracker.h new file mode 100644 index 00000000..453220ed --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_tracker.h @@ -0,0 +1,142 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_ +#define ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace cord_internal + { + + // CordzUpdateTracker tracks counters for Cord update methods. + // + // The purpose of CordzUpdateTracker is to track the number of calls to methods + // updating Cord data for sampled cords. The class internally uses 'lossy' + // atomic operations: Cord is thread-compatible, so there is no need to + // synchronize updates. However, Cordz collection threads may call 'Value()' at + // any point, so the class needs to provide thread safe access. + // + // This class is thread-safe. But as per above comments, all non-const methods + // should be used single-threaded only: updates are thread-safe but lossy. + class CordzUpdateTracker + { + public: + // Tracked update methods. + enum MethodIdentifier + { + kUnknown, + kAppendCord, + kAppendCordBuffer, + kAppendExternalMemory, + kAppendString, + kAssignCord, + kAssignString, + kClear, + kConstructorCord, + kConstructorString, + kCordReader, + kFlatten, + kGetAppendBuffer, + kGetAppendRegion, + kMakeCordFromExternal, + kMoveAppendCord, + kMoveAssignCord, + kMovePrependCord, + kPrependCord, + kPrependCordBuffer, + kPrependString, + kRemovePrefix, + kRemoveSuffix, + kSetExpectedChecksum, + kSubCord, + + // kNumMethods defines the number of entries: must be the last entry. + kNumMethods, + }; + + // Constructs a new instance. All counters are zero-initialized. + constexpr CordzUpdateTracker() noexcept : + values_{} + { + } + + // Copy constructs a new instance. + CordzUpdateTracker(const CordzUpdateTracker& rhs) noexcept + { + *this = rhs; + } + + // Assigns the provided value to this instance. + CordzUpdateTracker& operator=(const CordzUpdateTracker& rhs) noexcept + { + for (int i = 0; i < kNumMethods; ++i) + { + values_[i].store(rhs.values_[i].load(std::memory_order_relaxed), std::memory_order_relaxed); + } + return *this; + } + + // Returns the value for the specified method. + int64_t Value(MethodIdentifier method) const + { + return values_[method].load(std::memory_order_relaxed); + } + + // Increases the value for the specified method by `n` + void LossyAdd(MethodIdentifier method, int64_t n = 1) + { + auto& value = values_[method]; + value.store(value.load(std::memory_order_relaxed) + n, std::memory_order_relaxed); + } + + // Adds all the values from `src` to this instance + void LossyAdd(const CordzUpdateTracker& src) + { + for (int i = 0; i < kNumMethods; ++i) + { + MethodIdentifier method = static_cast(i); + if (int64_t value = src.Value(method)) + { + LossyAdd(method, value); + } + } + } + + private: + // Until C++20 std::atomic is not constexpr default-constructible, so we need + // a wrapper for this class to be constexpr constructible. + class Counter : public std::atomic + { + public: + constexpr Counter() noexcept : + std::atomic(0) + { + } + }; + + Counter values_[kNumMethods]; + }; + + } // namespace cord_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_CORDZ_UPDATE_TRACKER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/damerau_levenshtein_distance.h b/CAPI/cpp/grpc/include/absl/strings/internal/damerau_levenshtein_distance.h new file mode 100644 index 00000000..44463af9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/damerau_levenshtein_distance.h @@ -0,0 +1,35 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ +#define ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ + +#include + +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + // Calculate DamerauLevenshtein distance between two strings. + // When the distance is larger than cutoff, the code just returns cutoff + 1. + uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1, absl::string_view s2, uint8_t cutoff); + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_DAMERAU_LEVENSHTEIN_DISTANCE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/escaping.h b/CAPI/cpp/grpc/include/absl/strings/internal/escaping.h new file mode 100644 index 00000000..9af21700 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/escaping.h @@ -0,0 +1,59 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_ESCAPING_H_ +#define ABSL_STRINGS_INTERNAL_ESCAPING_H_ + +#include + +#include "absl/strings/internal/resize_uninitialized.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + ABSL_CONST_INIT extern const char kBase64Chars[]; + ABSL_CONST_INIT extern const char kWebSafeBase64Chars[]; + + // Calculates the length of a Base64 encoding (RFC 4648) of a string of length + // `input_len`, with or without padding per `do_padding`. Note that 'web-safe' + // encoding (section 5 of the RFC) does not change this length. + size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding); + + // Base64-encodes `src` using the alphabet provided in `base64` (which + // determines whether to do web-safe encoding or not) and writes the result to + // `dest`. If `do_padding` is true, `dest` is padded with '=' chars until its + // length is a multiple of 3. Returns the length of `dest`. + size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, size_t szdest, const char* base64, bool do_padding); + template + void Base64EscapeInternal(const unsigned char* src, size_t szsrc, String* dest, bool do_padding, const char* base64_chars) + { + const size_t calc_escaped_size = + CalculateBase64EscapedLenInternal(szsrc, do_padding); + STLStringResizeUninitialized(dest, calc_escaped_size); + + const size_t escaped_len = Base64EscapeInternal( + src, szsrc, &(*dest)[0], dest->size(), base64_chars, do_padding + ); + assert(calc_escaped_size == escaped_len); + dest->erase(escaped_len); + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_ESCAPING_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/escaping_test_common.h b/CAPI/cpp/grpc/include/absl/strings/internal/escaping_test_common.h new file mode 100644 index 00000000..48be2596 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/escaping_test_common.h @@ -0,0 +1,140 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This test contains common things needed by both escaping_test.cc and +// escaping_benchmark.cc. + +#ifndef ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_ +#define ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_ + +#include +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + struct base64_testcase + { + absl::string_view plaintext; + absl::string_view cyphertext; + }; + + inline const std::array& base64_strings() + { + static const std::array testcase{{ + // Some google quotes + // Cyphertext created with "uuencode (GNU sharutils) 4.6.3" + // (Note that we're testing the websafe encoding, though, so if + // you add messages, be sure to run "tr -- '+/' '-_'" on the output) + {"I was always good at math and science, and I never realized " + "that was unusual or somehow undesirable. So one of the things " + "I care a lot about is helping to remove that stigma, " + "to show girls that you can be feminine, you can like the things " + "that girls like, but you can also be really good at technology. " + "You can be really good at building things." + " - Marissa Meyer, Newsweek, 2010-12-22" + "\n", + + "SSB3YXMgYWx3YXlzIGdvb2QgYXQgbWF0aCBhbmQgc2NpZW5jZSwgYW5kIEkg" + "bmV2ZXIgcmVhbGl6ZWQgdGhhdCB3YXMgdW51c3VhbCBvciBzb21laG93IHVu" + "ZGVzaXJhYmxlLiBTbyBvbmUgb2YgdGhlIHRoaW5ncyBJIGNhcmUgYSBsb3Qg" + "YWJvdXQgaXMgaGVscGluZyB0byByZW1vdmUgdGhhdCBzdGlnbWEsIHRvIHNo" + "b3cgZ2lybHMgdGhhdCB5b3UgY2FuIGJlIGZlbWluaW5lLCB5b3UgY2FuIGxp" + "a2UgdGhlIHRoaW5ncyB0aGF0IGdpcmxzIGxpa2UsIGJ1dCB5b3UgY2FuIGFs" + "c28gYmUgcmVhbGx5IGdvb2QgYXQgdGVjaG5vbG9neS4gWW91IGNhbiBiZSBy" + "ZWFsbHkgZ29vZCBhdCBidWlsZGluZyB0aGluZ3MuIC0gTWFyaXNzYSBNZXll" + "ciwgTmV3c3dlZWssIDIwMTAtMTItMjIK"}, + + {"Typical first year for a new cluster: " + "~0.5 overheating " + "~1 PDU failure " + "~1 rack-move " + "~1 network rewiring " + "~20 rack failures " + "~5 racks go wonky " + "~8 network maintenances " + "~12 router reloads " + "~3 router failures " + "~dozens of minor 30-second blips for dns " + "~1000 individual machine failures " + "~thousands of hard drive failures " + "slow disks, bad memory, misconfigured machines, flaky machines, etc." + " - Jeff Dean, The Joys of Real Hardware" + "\n", + + "VHlwaWNhbCBmaXJzdCB5ZWFyIGZvciBhIG5ldyBjbHVzdGVyOiB-MC41IG92" + "ZXJoZWF0aW5nIH4xIFBEVSBmYWlsdXJlIH4xIHJhY2stbW92ZSB-MSBuZXR3" + "b3JrIHJld2lyaW5nIH4yMCByYWNrIGZhaWx1cmVzIH41IHJhY2tzIGdvIHdv" + "bmt5IH44IG5ldHdvcmsgbWFpbnRlbmFuY2VzIH4xMiByb3V0ZXIgcmVsb2Fk" + "cyB-MyByb3V0ZXIgZmFpbHVyZXMgfmRvemVucyBvZiBtaW5vciAzMC1zZWNv" + "bmQgYmxpcHMgZm9yIGRucyB-MTAwMCBpbmRpdmlkdWFsIG1hY2hpbmUgZmFp" + "bHVyZXMgfnRob3VzYW5kcyBvZiBoYXJkIGRyaXZlIGZhaWx1cmVzIHNsb3cg" + "ZGlza3MsIGJhZCBtZW1vcnksIG1pc2NvbmZpZ3VyZWQgbWFjaGluZXMsIGZs" + "YWt5IG1hY2hpbmVzLCBldGMuIC0gSmVmZiBEZWFuLCBUaGUgSm95cyBvZiBS" + "ZWFsIEhhcmR3YXJlCg"}, + + {"I'm the head of the webspam team at Google. " + "That means that if you type your name into Google and get porn back, " + "it's my fault. Unless you're a porn star, in which case porn is a " + "completely reasonable response." + " - Matt Cutts, Google Plus" + "\n", + + "SSdtIHRoZSBoZWFkIG9mIHRoZSB3ZWJzcGFtIHRlYW0gYXQgR29vZ2xlLiAg" + "VGhhdCBtZWFucyB0aGF0IGlmIHlvdSB0eXBlIHlvdXIgbmFtZSBpbnRvIEdv" + "b2dsZSBhbmQgZ2V0IHBvcm4gYmFjaywgaXQncyBteSBmYXVsdC4gVW5sZXNz" + "IHlvdSdyZSBhIHBvcm4gc3RhciwgaW4gd2hpY2ggY2FzZSBwb3JuIGlzIGEg" + "Y29tcGxldGVseSByZWFzb25hYmxlIHJlc3BvbnNlLiAtIE1hdHQgQ3V0dHMs" + "IEdvb2dsZSBQbHVzCg"}, + + {"It will still be a long time before machines approach human " + "intelligence. " + "But luckily, machines don't actually have to be intelligent; " + "they just have to fake it. Access to a wealth of information, " + "combined with a rudimentary decision-making capacity, " + "can often be almost as useful. Of course, the results are better yet " + "when coupled with intelligence. A reference librarian with access to " + "a good search engine is a formidable tool." + " - Craig Silverstein, Siemens Pictures of the Future, Spring 2004" + "\n", + + "SXQgd2lsbCBzdGlsbCBiZSBhIGxvbmcgdGltZSBiZWZvcmUgbWFjaGluZXMg" + "YXBwcm9hY2ggaHVtYW4gaW50ZWxsaWdlbmNlLiBCdXQgbHVja2lseSwgbWFj" + "aGluZXMgZG9uJ3QgYWN0dWFsbHkgaGF2ZSB0byBiZSBpbnRlbGxpZ2VudDsg" + "dGhleSBqdXN0IGhhdmUgdG8gZmFrZSBpdC4gQWNjZXNzIHRvIGEgd2VhbHRo" + "IG9mIGluZm9ybWF0aW9uLCBjb21iaW5lZCB3aXRoIGEgcnVkaW1lbnRhcnkg" + "ZGVjaXNpb24tbWFraW5nIGNhcGFjaXR5LCBjYW4gb2Z0ZW4gYmUgYWxtb3N0" + "IGFzIHVzZWZ1bC4gT2YgY291cnNlLCB0aGUgcmVzdWx0cyBhcmUgYmV0dGVy" + "IHlldCB3aGVuIGNvdXBsZWQgd2l0aCBpbnRlbGxpZ2VuY2UuIEEgcmVmZXJl" + "bmNlIGxpYnJhcmlhbiB3aXRoIGFjY2VzcyB0byBhIGdvb2Qgc2VhcmNoIGVu" + "Z2luZSBpcyBhIGZvcm1pZGFibGUgdG9vbC4gLSBDcmFpZyBTaWx2ZXJzdGVp" + "biwgU2llbWVucyBQaWN0dXJlcyBvZiB0aGUgRnV0dXJlLCBTcHJpbmcgMjAw" + "NAo"}, + + // Degenerate edge case + {"", + ""}, + }}; + + return testcase; + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_ESCAPING_TEST_COMMON_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/has_absl_stringify.h b/CAPI/cpp/grpc/include/absl/strings/internal/has_absl_stringify.h new file mode 100644 index 00000000..70fe0705 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/has_absl_stringify.h @@ -0,0 +1,64 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ +#define ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ +#include +#include +#include + +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace strings_internal + { + + // This is an empty class not intended to be used. It exists so that + // `HasAbslStringify` can reference a universal class rather than needing to be + // copied for each new sink. + class UnimplementedSink + { + public: + void Append(size_t count, char ch); + + void Append(string_view v); + + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(UnimplementedSink* sink, absl::string_view v); + }; + + template + struct HasAbslStringify : std::false_type + { + }; + + template + struct HasAbslStringify< + T, + std::enable_if_t(), + std::declval() + ))>::value>> : std::true_type + { + }; + + } // namespace strings_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_HAS_ABSL_STRINGIFY_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/memutil.h b/CAPI/cpp/grpc/include/absl/strings/internal/memutil.h new file mode 100644 index 00000000..3582c7b3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/memutil.h @@ -0,0 +1,42 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_STRINGS_INTERNAL_MEMUTIL_H_ +#define ABSL_STRINGS_INTERNAL_MEMUTIL_H_ + +#include +#include + +#include "absl/base/port.h" // disable some warnings on Windows +#include "absl/strings/ascii.h" // for absl::ascii_tolower + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // Performs a byte-by-byte comparison of `len` bytes of the strings `s1` and + // `s2`, ignoring the case of the characters. It returns an integer less than, + // equal to, or greater than zero if `s1` is found, respectively, to be less + // than, to match, or be greater than `s2`. + int memcasecmp(const char* s1, const char* s2, size_t len); + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_MEMUTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/numbers_test_common.h b/CAPI/cpp/grpc/include/absl/strings/internal/numbers_test_common.h new file mode 100644 index 00000000..94b48fed --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/numbers_test_common.h @@ -0,0 +1,189 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file contains common things needed by numbers_test.cc, +// numbers_legacy_test.cc and numbers_benchmark.cc. + +#ifndef ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_ +#define ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + template + inline bool Itoa(IntType value, int base, std::string* destination) + { + destination->clear(); + if (base <= 1 || base > 36) + { + return false; + } + + if (value == 0) + { + destination->push_back('0'); + return true; + } + + bool negative = value < 0; + while (value != 0) + { + const IntType next_value = value / base; + // Can't use std::abs here because of problems when IntType is unsigned. + int remainder = + static_cast(value > next_value * base ? value - next_value * base : next_value * base - value); + char c = remainder < 10 ? '0' + remainder : 'A' + remainder - 10; + destination->insert(0, 1, c); + value = next_value; + } + + if (negative) + { + destination->insert(0, 1, '-'); + } + return true; + } + + struct uint32_test_case + { + const char* str; + bool expect_ok; + int base; // base to pass to the conversion function + uint32_t expected; + }; + + inline const std::array& strtouint32_test_cases() + { + static const std::array test_cases{{ + {"0xffffffff", true, 16, (std::numeric_limits::max)()}, + {"0x34234324", true, 16, 0x34234324}, + {"34234324", true, 16, 0x34234324}, + {"0", true, 16, 0}, + {" \t\n 0xffffffff", true, 16, (std::numeric_limits::max)()}, + {" \f\v 46", true, 10, 46}, // must accept weird whitespace + {" \t\n 72717222", true, 8, 072717222}, + {" \t\n 072717222", true, 8, 072717222}, + {" \t\n 072717228", false, 8, 07271722}, + {"0", true, 0, 0}, + + // Base-10 version. + {"34234324", true, 0, 34234324}, + {"4294967295", true, 0, (std::numeric_limits::max)()}, + {"34234324 \n\t", true, 10, 34234324}, + + // Unusual base + {"0", true, 3, 0}, + {"2", true, 3, 2}, + {"11", true, 3, 4}, + + // Invalid uints. + {"", false, 0, 0}, + {" ", false, 0, 0}, + {"abc", false, 0, 0}, // would be valid hex, but prefix is missing + {"34234324a", false, 0, 34234324}, + {"34234.3", false, 0, 34234}, + {"-1", false, 0, 0}, + {" -123", false, 0, 0}, + {" \t\n -123", false, 0, 0}, + + // Out of bounds. + {"4294967296", false, 0, (std::numeric_limits::max)()}, + {"0x100000000", false, 0, (std::numeric_limits::max)()}, + {nullptr, false, 0, 0}, + }}; + return test_cases; + } + + struct uint64_test_case + { + const char* str; + bool expect_ok; + int base; + uint64_t expected; + }; + + inline const std::array& strtouint64_test_cases() + { + static const std::array test_cases{{ + {"0x3423432448783446", true, 16, int64_t{0x3423432448783446}}, + {"3423432448783446", true, 16, int64_t{0x3423432448783446}}, + + {"0", true, 16, 0}, + {"000", true, 0, 0}, + {"0", true, 0, 0}, + {" \t\n 0xffffffffffffffff", true, 16, (std::numeric_limits::max)()}, + + {"012345670123456701234", true, 8, int64_t{012345670123456701234}}, + {"12345670123456701234", true, 8, int64_t{012345670123456701234}}, + + {"12845670123456701234", false, 8, 0}, + + // Base-10 version. + {"34234324487834466", true, 0, int64_t{34234324487834466}}, + + {" \t\n 18446744073709551615", true, 0, (std::numeric_limits::max)()}, + + {"34234324487834466 \n\t ", true, 0, int64_t{34234324487834466}}, + + {" \f\v 46", true, 10, 46}, // must accept weird whitespace + + // Unusual base + {"0", true, 3, 0}, + {"2", true, 3, 2}, + {"11", true, 3, 4}, + + {"0", true, 0, 0}, + + // Invalid uints. + {"", false, 0, 0}, + {" ", false, 0, 0}, + {"abc", false, 0, 0}, + {"34234324487834466a", false, 0, 0}, + {"34234487834466.3", false, 0, 0}, + {"-1", false, 0, 0}, + {" -123", false, 0, 0}, + {" \t\n -123", false, 0, 0}, + + // Out of bounds. + {"18446744073709551616", false, 10, 0}, + {"18446744073709551616", false, 0, 0}, + {"0x10000000000000000", false, 16, (std::numeric_limits::max)()}, + {"0X10000000000000000", false, 16, (std::numeric_limits::max)()}, // 0X versus 0x. + {"0x10000000000000000", false, 0, (std::numeric_limits::max)()}, + {"0X10000000000000000", false, 0, (std::numeric_limits::max)()}, // 0X versus 0x. + + {"0x1234", true, 16, 0x1234}, + + // Base-10 string version. + {"1234", true, 0, 1234}, + {nullptr, false, 0, 0}, + }}; + return test_cases; + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_NUMBERS_TEST_COMMON_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/ostringstream.h b/CAPI/cpp/grpc/include/absl/strings/internal/ostringstream.h new file mode 100644 index 00000000..56afb38d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/ostringstream.h @@ -0,0 +1,144 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_ +#define ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // The same as std::ostringstream but appends to a user-specified std::string, + // and is faster. It is ~70% faster to create, ~50% faster to write to, and + // completely free to extract the result std::string. + // + // std::string s; + // OStringStream strm(&s); + // strm << 42 << ' ' << 3.14; // appends to `s` + // + // The stream object doesn't have to be named. Starting from C++11 operator<< + // works with rvalues of std::ostream. + // + // std::string s; + // OStringStream(&s) << 42 << ' ' << 3.14; // appends to `s` + // + // OStringStream is faster to create than std::ostringstream but it's still + // relatively slow. Avoid creating multiple streams where a single stream will + // do. + // + // Creates unnecessary instances of OStringStream: slow. + // + // std::string s; + // OStringStream(&s) << 42; + // OStringStream(&s) << ' '; + // OStringStream(&s) << 3.14; + // + // Creates a single instance of OStringStream and reuses it: fast. + // + // std::string s; + // OStringStream strm(&s); + // strm << 42; + // strm << ' '; + // strm << 3.14; + // + // Note: flush() has no effect. No reason to call it. + class OStringStream final : public std::ostream + { + public: + // The argument can be null, in which case you'll need to call str(p) with a + // non-null argument before you can write to the stream. + // + // The destructor of OStringStream doesn't use the std::string. It's OK to + // destroy the std::string before the stream. + explicit OStringStream(std::string* str) : + std::ostream(&buf_), + buf_(str) + { + } + OStringStream(OStringStream&& that) : + std::ostream(std::move(static_cast(that))), + buf_(that.buf_) + { + rdbuf(&buf_); + } + OStringStream& operator=(OStringStream&& that) + { + std::ostream::operator=(std::move(static_cast(that))); + buf_ = that.buf_; + rdbuf(&buf_); + return *this; + } + + std::string* str() + { + return buf_.str(); + } + const std::string* str() const + { + return buf_.str(); + } + void str(std::string* str) + { + buf_.str(str); + } + + private: + class Streambuf final : public std::streambuf + { + public: + explicit Streambuf(std::string* str) : + str_(str) + { + } + Streambuf(const Streambuf&) = default; + Streambuf& operator=(const Streambuf&) = default; + + std::string* str() + { + return str_; + } + const std::string* str() const + { + return str_; + } + void str(std::string* str) + { + str_ = str; + } + + protected: + int_type overflow(int c) override; + std::streamsize xsputn(const char* s, std::streamsize n) override; + + private: + std::string* str_; + } buf_; + }; + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_OSTRINGSTREAM_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/pow10_helper.h b/CAPI/cpp/grpc/include/absl/strings/internal/pow10_helper.h new file mode 100644 index 00000000..345c4604 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/pow10_helper.h @@ -0,0 +1,42 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This test helper library contains a table of powers of 10, to guarantee +// precise values are computed across the full range of doubles. We can't rely +// on the pow() function, because not all standard libraries ship a version +// that is precise. +#ifndef ABSL_STRINGS_INTERNAL_POW10_HELPER_H_ +#define ABSL_STRINGS_INTERNAL_POW10_HELPER_H_ + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // Computes the precise value of 10^exp. (I.e. the nearest representable + // double to the exact value, rounding to nearest-even in the (single) case of + // being exactly halfway between.) + double Pow10(int exp); + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_POW10_HELPER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/resize_uninitialized.h b/CAPI/cpp/grpc/include/absl/strings/internal/resize_uninitialized.h new file mode 100644 index 00000000..642b50a7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/resize_uninitialized.h @@ -0,0 +1,141 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_ +#define ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_ + +#include +#include +#include +#include + +#include "absl/base/port.h" +#include "absl/meta/type_traits.h" // for void_t + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // In this type trait, we look for a __resize_default_init member function, and + // we use it if available, otherwise, we use resize. We provide HasMember to + // indicate whether __resize_default_init is present. + template + struct ResizeUninitializedTraits + { + using HasMember = std::false_type; + static void Resize(string_type* s, size_t new_size) + { + s->resize(new_size); + } + }; + + // __resize_default_init is provided by libc++ >= 8.0 + template + struct ResizeUninitializedTraits< + string_type, + absl::void_t() + .__resize_default_init(237))>> + { + using HasMember = std::true_type; + static void Resize(string_type* s, size_t new_size) + { + s->__resize_default_init(new_size); + } + }; + + // Returns true if the std::string implementation supports a resize where + // the new characters added to the std::string are left untouched. + // + // (A better name might be "STLStringSupportsUninitializedResize", alluding to + // the previous function.) + template + inline constexpr bool STLStringSupportsNontrashingResize(string_type*) + { + return ResizeUninitializedTraits::HasMember::value; + } + + // Like str->resize(new_size), except any new characters added to "*str" as a + // result of resizing may be left uninitialized, rather than being filled with + // '0' bytes. Typically used when code is then going to overwrite the backing + // store of the std::string with known data. + template + inline void STLStringResizeUninitialized(string_type* s, size_t new_size) + { + ResizeUninitializedTraits::Resize(s, new_size); + } + + // Used to ensure exponential growth so that the amortized complexity of + // increasing the string size by a small amount is O(1), in contrast to + // O(str->size()) in the case of precise growth. + template + void STLStringReserveAmortized(string_type* s, size_t new_size) + { + const size_t cap = s->capacity(); + if (new_size > cap) + { + // Make sure to always grow by at least a factor of 2x. + s->reserve((std::max)(new_size, 2 * cap)); + } + } + + // In this type trait, we look for an __append_default_init member function, and + // we use it if available, otherwise, we use append. + template + struct AppendUninitializedTraits + { + static void Append(string_type* s, size_t n) + { + s->append(n, typename string_type::value_type()); + } + }; + + template + struct AppendUninitializedTraits< + string_type, + absl::void_t() + .__append_default_init(237))>> + { + static void Append(string_type* s, size_t n) + { + s->__append_default_init(n); + } + }; + + // Like STLStringResizeUninitialized(str, new_size), except guaranteed to use + // exponential growth so that the amortized complexity of increasing the string + // size by a small amount is O(1), in contrast to O(str->size()) in the case of + // precise growth. + template + void STLStringResizeUninitializedAmortized(string_type* s, size_t new_size) + { + const size_t size = s->size(); + if (new_size > size) + { + AppendUninitializedTraits::Append(s, new_size - size); + } + else + { + s->erase(new_size); + } + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_RESIZE_UNINITIALIZED_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/stl_type_traits.h b/CAPI/cpp/grpc/include/absl/strings/internal/stl_type_traits.h new file mode 100644 index 00000000..9536f1ce --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/stl_type_traits.h @@ -0,0 +1,239 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// The file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type +// trait metafunction to assist in working with the _GLIBCXX_DEBUG debug +// wrappers of STL containers. +// +// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including +// absl/strings/str_split.h. +// +// IWYU pragma: private, include "absl/strings/str_split.h" + +#ifndef ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_ +#define ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + template class T> + struct IsSpecializationImpl : std::false_type + { + }; + template class T, typename... Args> + struct IsSpecializationImpl, T> : std::true_type + { + }; + template class T> + using IsSpecialization = IsSpecializationImpl, T>; + + template + struct IsArrayImpl : std::false_type + { + }; + template class A, typename T, size_t N> + struct IsArrayImpl> : std::is_same, std::array> + { + }; + template + using IsArray = IsArrayImpl>; + + template + struct IsBitsetImpl : std::false_type + { + }; + template class B, size_t N> + struct IsBitsetImpl> : std::is_same, std::bitset> + { + }; + template + using IsBitset = IsBitsetImpl>; + + template + struct IsSTLContainer : absl::disjunction, IsBitset, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization, IsSpecialization> + { + }; + + template class T, typename = void> + struct IsBaseOfSpecializationImpl : std::false_type + { + }; + // IsBaseOfSpecializationImpl needs multiple partial specializations to SFINAE + // on the existence of container dependent types and plug them into the STL + // template. + template class T> + struct IsBaseOfSpecializationImpl< + C, + T, + absl::void_t> : std::is_base_of> + { + }; + template class T> + struct IsBaseOfSpecializationImpl< + C, + T, + absl::void_t> : std::is_base_of> + { + }; + template class T> + struct IsBaseOfSpecializationImpl< + C, + T, + absl::void_t> : std::is_base_of> + { + }; + template class T> + struct IsBaseOfSpecializationImpl< + C, + T, + absl::void_t> : std::is_base_of> + { + }; + template class T> + struct IsBaseOfSpecializationImpl< + C, + T, + absl::void_t> : std::is_base_of> + { + }; + template class T> + using IsBaseOfSpecialization = IsBaseOfSpecializationImpl, T>; + + template + struct IsBaseOfArrayImpl : std::false_type + { + }; + template class A, typename T, size_t N> + struct IsBaseOfArrayImpl> : std::is_base_of, std::array> + { + }; + template + using IsBaseOfArray = IsBaseOfArrayImpl>; + + template + struct IsBaseOfBitsetImpl : std::false_type + { + }; + template class B, size_t N> + struct IsBaseOfBitsetImpl> : std::is_base_of, std::bitset> + { + }; + template + using IsBaseOfBitset = IsBaseOfBitsetImpl>; + + template + struct IsBaseOfSTLContainer : absl::disjunction, IsBaseOfBitset, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization, IsBaseOfSpecialization> + { + }; + + template class T, typename = void> + struct IsConvertibleToSpecializationImpl : std::false_type + { + }; + // IsConvertibleToSpecializationImpl needs multiple partial specializations to + // SFINAE on the existence of container dependent types and plug them into the + // STL template. + template class T> + struct IsConvertibleToSpecializationImpl< + C, + T, + absl::void_t> : std::is_convertible> + { + }; + template class T> + struct IsConvertibleToSpecializationImpl< + C, + T, + absl::void_t> : std::is_convertible> + { + }; + template class T> + struct IsConvertibleToSpecializationImpl< + C, + T, + absl::void_t> : std::is_convertible> + { + }; + template class T> + struct IsConvertibleToSpecializationImpl< + C, + T, + absl::void_t> : std::is_convertible> + { + }; + template class T> + struct IsConvertibleToSpecializationImpl< + C, + T, + absl::void_t> : std::is_convertible> + { + }; + template class T> + using IsConvertibleToSpecialization = + IsConvertibleToSpecializationImpl, T>; + + template + struct IsConvertibleToArrayImpl : std::false_type + { + }; + template class A, typename T, size_t N> + struct IsConvertibleToArrayImpl> : std::is_convertible, std::array> + { + }; + template + using IsConvertibleToArray = IsConvertibleToArrayImpl>; + + template + struct IsConvertibleToBitsetImpl : std::false_type + { + }; + template class B, size_t N> + struct IsConvertibleToBitsetImpl> : std::is_convertible, std::bitset> + { + }; + template + using IsConvertibleToBitset = IsConvertibleToBitsetImpl>; + + template + struct IsConvertibleToSTLContainer : absl::disjunction, IsConvertibleToBitset, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization, IsConvertibleToSpecialization> + { + }; + + template + struct IsStrictlyBaseOfAndConvertibleToSTLContainer : absl::conjunction>, IsBaseOfSTLContainer, IsConvertibleToSTLContainer> + { + }; + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_STRINGS_INTERNAL_STL_TYPE_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/arg.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/arg.h new file mode 100644 index 00000000..efe9568a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/arg.h @@ -0,0 +1,638 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/port.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/int128.h" +#include "absl/strings/internal/has_absl_stringify.h" +#include "absl/strings/internal/str_format/extension.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class Cord; + class FormatCountCapture; + class FormatSink; + + template + struct FormatConvertResult; + class FormatConversionSpec; + + namespace str_format_internal + { + + template + struct ArgConvertResult + { + bool value; + }; + + using IntegralConvertResult = ArgConvertResult; + using FloatingConvertResult = ArgConvertResult; + using CharConvertResult = ArgConvertResult; + + template + struct HasUserDefinedConvert : std::false_type + { + }; + + template + struct HasUserDefinedConvert(), std::declval(), std::declval()))>> : std::true_type + { + }; + + // These declarations prevent ADL lookup from continuing in absl namespaces, + // we are deliberately using these as ADL hooks and want them to consider + // non-absl namespaces only. + void AbslFormatConvert(); + void AbslStringify(); + + template + bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + // Forward declarations of internal `ConvertIntArg` function template + // instantiations are here to avoid including the template body in the headers + // and instantiating it in large numbers of translation units. Explicit + // instantiations can be found in "absl/strings/internal/str_format/arg.cc" + extern template bool ConvertIntArg(char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + extern template bool ConvertIntArg(signed char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + extern template bool ConvertIntArg(unsigned char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + extern template bool ConvertIntArg(short v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + extern template bool ConvertIntArg( // NOLINT + unsigned short v, + FormatConversionSpecImpl conv, // NOLINT + FormatSinkImpl* sink + ); + extern template bool ConvertIntArg(int v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + extern template bool ConvertIntArg(unsigned int v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + extern template bool ConvertIntArg( // NOLINT + long v, + FormatConversionSpecImpl conv, + FormatSinkImpl* sink + ); // NOLINT + extern template bool ConvertIntArg(unsigned long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + extern template bool ConvertIntArg(long long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + extern template bool ConvertIntArg( // NOLINT + unsigned long long v, + FormatConversionSpecImpl conv, // NOLINT + FormatSinkImpl* sink + ); + + template + auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) + -> decltype(AbslFormatConvert(v, std::declval(), std::declval())) + { + using FormatConversionSpecT = + absl::enable_if_t; + using FormatSinkT = + absl::enable_if_t; + auto fcs = conv.Wrap(); + auto fs = sink->Wrap(); + return AbslFormatConvert(v, fcs, &fs); + } + + template + auto FormatConvertImpl(const T& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) + -> std::enable_if_t::value && std::is_void(), v))>::value, IntegralConvertResult> + { + if (conv.conversion_char() == FormatConversionCharInternal::v) + { + using FormatSinkT = + absl::enable_if_t; + auto fs = sink->Wrap(); + AbslStringify(fs, v); + return {true}; + } + else + { + return {ConvertIntArg( + static_cast::type>(v), conv, sink + )}; + } + } + + template + auto FormatConvertImpl(const T& v, FormatConversionSpecImpl, FormatSinkImpl* sink) + -> std::enable_if_t::value && std::is_void(), v))>::value, ArgConvertResult> + { + using FormatSinkT = + absl::enable_if_t; + auto fs = sink->Wrap(); + AbslStringify(fs, v); + return {true}; + } + + template + class StreamedWrapper; + + // If 'v' can be converted (in the printf sense) according to 'conv', + // then convert it, appending to `sink` and return `true`. + // Otherwise fail and return `false`. + + // AbslFormatConvert(v, conv, sink) is intended to be found by ADL on 'v' + // as an extension mechanism. These FormatConvertImpl functions are the default + // implementations. + // The ADL search is augmented via the 'Sink*' parameter, which also + // serves as a disambiguator to reject possible unintended 'AbslFormatConvert' + // functions in the namespaces associated with 'v'. + + // Raw pointers. + struct VoidPtr + { + VoidPtr() = default; + template(std::declval())) = 0> + VoidPtr(T* ptr) // NOLINT + : + value(ptr ? reinterpret_cast(ptr) : 0) + { + } + uintptr_t value; + }; + + template + constexpr FormatConversionCharSet ExtractCharSet(FormatConvertResult) + { + return C; + } + + template + constexpr FormatConversionCharSet ExtractCharSet(ArgConvertResult) + { + return C; + } + + using StringConvertResult = ArgConvertResult; + ArgConvertResult FormatConvertImpl( + VoidPtr v, FormatConversionSpecImpl conv, FormatSinkImpl* sink + ); + + // Strings. + StringConvertResult FormatConvertImpl(const std::string& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + StringConvertResult FormatConvertImpl(string_view v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); +#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW) + inline StringConvertResult FormatConvertImpl(std::string_view v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) + { + return FormatConvertImpl(absl::string_view(v.data(), v.size()), conv, sink); + } +#endif // ABSL_HAVE_STD_STRING_VIEW && !ABSL_USES_STD_STRING_VIEW + + ArgConvertResult + FormatConvertImpl(const char* v, const FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + template::value>::type* = nullptr> + StringConvertResult FormatConvertImpl(const AbslCord& value, FormatConversionSpecImpl conv, FormatSinkImpl* sink) + { + bool is_left = conv.has_left_flag(); + size_t space_remaining = 0; + + int width = conv.width(); + if (width >= 0) + space_remaining = static_cast(width); + + size_t to_write = value.size(); + + int precision = conv.precision(); + if (precision >= 0) + to_write = (std::min)(to_write, static_cast(precision)); + + space_remaining = Excess(to_write, space_remaining); + + if (space_remaining > 0 && !is_left) + sink->Append(space_remaining, ' '); + + for (string_view piece : value.Chunks()) + { + if (piece.size() > to_write) + { + piece.remove_suffix(piece.size() - to_write); + to_write = 0; + } + else + { + to_write -= piece.size(); + } + sink->Append(piece); + if (to_write == 0) + { + break; + } + } + + if (space_remaining > 0 && is_left) + sink->Append(space_remaining, ' '); + return {true}; + } + + bool ConvertBoolArg(bool v, FormatSinkImpl* sink); + + // Floats. + FloatingConvertResult FormatConvertImpl(float v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + FloatingConvertResult FormatConvertImpl(double v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + FloatingConvertResult FormatConvertImpl(long double v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + // Chars. + CharConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + // Ints. + IntegralConvertResult FormatConvertImpl(signed char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(unsigned char v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(short v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(unsigned short v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(int v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(unsigned v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(unsigned long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(long long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(unsigned long long v, // NOLINT + FormatConversionSpecImpl conv, + FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(int128 v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + IntegralConvertResult FormatConvertImpl(uint128 v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + // This function needs to be a template due to ambiguity regarding type + // conversions. + template::value, int> = 0> + IntegralConvertResult FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink) + { + if (conv.conversion_char() == FormatConversionCharInternal::v) + { + return {ConvertBoolArg(v, sink)}; + } + + return FormatConvertImpl(static_cast(v), conv, sink); + } + + // We provide this function to help the checker, but it is never defined. + // FormatArgImpl will use the underlying Convert functions instead. + template + typename std::enable_if::value && !HasUserDefinedConvert::value && !strings_internal::HasAbslStringify::value, IntegralConvertResult>::type + FormatConvertImpl(T v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); + + template + StringConvertResult FormatConvertImpl(const StreamedWrapper& v, FormatConversionSpecImpl conv, FormatSinkImpl* out) + { + std::ostringstream oss; + oss << v.v_; + if (!oss) + return {false}; + return str_format_internal::FormatConvertImpl(oss.str(), conv, out); + } + + // Use templates and dependent types to delay evaluation of the function + // until after FormatCountCapture is fully defined. + struct FormatCountCaptureHelper + { + template + static ArgConvertResult ConvertHelper( + const FormatCountCapture& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink + ) + { + const absl::enable_if_t& v2 = v; + + if (conv.conversion_char() != + str_format_internal::FormatConversionCharInternal::n) + { + return {false}; + } + *v2.p_ = static_cast(sink->size()); + return {true}; + } + }; + + template + ArgConvertResult FormatConvertImpl( + const FormatCountCapture& v, FormatConversionSpecImpl conv, FormatSinkImpl* sink + ) + { + return FormatCountCaptureHelper::ConvertHelper(v, conv, sink); + } + + // Helper friend struct to hide implementation details from the public API of + // FormatArgImpl. + struct FormatArgImplFriend + { + template + static bool ToInt(Arg arg, int* out) + { + // A value initialized FormatConversionSpecImpl has a `none` conv, which + // tells the dispatcher to run the `int` conversion. + return arg.dispatcher_(arg.data_, {}, out); + } + + template + static bool Convert(Arg arg, FormatConversionSpecImpl conv, FormatSinkImpl* out) + { + return arg.dispatcher_(arg.data_, conv, out); + } + + template + static typename Arg::Dispatcher GetVTablePtrForTest(Arg arg) + { + return arg.dispatcher_; + } + }; + + template + constexpr FormatConversionCharSet ArgumentToConv() + { + using ConvResult = decltype(str_format_internal::FormatConvertImpl( + std::declval(), + std::declval(), + std::declval() + )); + return absl::str_format_internal::ExtractCharSet(ConvResult{}); + } + + // A type-erased handle to a format argument. + class FormatArgImpl + { + private: + enum + { + kInlinedSpace = 8 + }; + + using VoidPtr = str_format_internal::VoidPtr; + + union Data + { + const void* ptr; + const volatile void* volatile_ptr; + char buf[kInlinedSpace]; + }; + + using Dispatcher = bool (*)(Data, FormatConversionSpecImpl, void* out); + + template + struct store_by_value : std::integral_constant::value || std::is_floating_point::value || std::is_pointer::value || std::is_same::value)> + { + }; + + enum StoragePolicy + { + ByPointer, + ByVolatilePointer, + ByValue + }; + template + struct storage_policy : std::integral_constant::value ? ByVolatilePointer : (store_by_value::value ? ByValue : ByPointer))> + { + }; + + // To reduce the number of vtables we will decay values before hand. + // Anything with a user-defined Convert will get its own vtable. + // For everything else: + // - Decay char* and char arrays into `const char*` + // - Decay any other pointer to `const void*` + // - Decay all enums to the integral promotion of their underlying type. + // - Decay function pointers to void*. + template + struct DecayType + { + static constexpr bool kHasUserDefined = + str_format_internal::HasUserDefinedConvert::value || + strings_internal::HasAbslStringify::value; + using type = typename std::conditional< + !kHasUserDefined && std::is_convertible::value, + const char*, + typename std::conditional::value, VoidPtr, const T&>::type>::type; + }; + template + struct DecayType::value && !strings_internal::HasAbslStringify::value && std::is_enum::value>::type> + { + using type = decltype(+typename std::underlying_type::type()); + }; + + public: + template + explicit FormatArgImpl(const T& value) + { + using D = typename DecayType::type; + static_assert( + std::is_same::value || storage_policy::value == ByValue, + "Decayed types must be stored by value" + ); + Init(static_cast(value)); + } + + private: + friend struct str_format_internal::FormatArgImplFriend; + template::value> + struct Manager; + + template + struct Manager + { + static Data SetValue(const T& value) + { + Data data; + data.ptr = std::addressof(value); + return data; + } + + static const T& Value(Data arg) + { + return *static_cast(arg.ptr); + } + }; + + template + struct Manager + { + static Data SetValue(const T& value) + { + Data data; + data.volatile_ptr = &value; + return data; + } + + static const T& Value(Data arg) + { + return *static_cast(arg.volatile_ptr); + } + }; + + template + struct Manager + { + static Data SetValue(const T& value) + { + Data data; + memcpy(data.buf, &value, sizeof(value)); + return data; + } + + static T Value(Data arg) + { + T value; + memcpy(&value, arg.buf, sizeof(T)); + return value; + } + }; + + template + void Init(const T& value) + { + data_ = Manager::SetValue(value); + dispatcher_ = &Dispatch; + } + + template + static int ToIntVal(const T& val) + { + using CommonType = typename std::conditional::value, int64_t, uint64_t>::type; + if (static_cast(val) > + static_cast((std::numeric_limits::max)())) + { + return (std::numeric_limits::max)(); + } + else if (std::is_signed::value && static_cast(val) < static_cast((std::numeric_limits::min)())) + { + return (std::numeric_limits::min)(); + } + return static_cast(val); + } + + template + static bool ToInt(Data arg, int* out, std::true_type /* is_integral */, std::false_type) + { + *out = ToIntVal(Manager::Value(arg)); + return true; + } + + template + static bool ToInt(Data arg, int* out, std::false_type, std::true_type /* is_enum */) + { + *out = ToIntVal(static_cast::type>( + Manager::Value(arg) + )); + return true; + } + + template + static bool ToInt(Data, int*, std::false_type, std::false_type) + { + return false; + } + + template + static bool Dispatch(Data arg, FormatConversionSpecImpl spec, void* out) + { + // A `none` conv indicates that we want the `int` conversion. + if (ABSL_PREDICT_FALSE(spec.conversion_char() == FormatConversionCharInternal::kNone)) + { + return ToInt(arg, static_cast(out), std::is_integral(), std::is_enum()); + } + if (ABSL_PREDICT_FALSE(!Contains(ArgumentToConv(), spec.conversion_char()))) + { + return false; + } + return str_format_internal::FormatConvertImpl( + Manager::Value(arg), spec, static_cast(out) + ) + .value; + } + + Data data_; + Dispatcher dispatcher_; + }; + +#define ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(T, E) \ + E template bool FormatArgImpl::Dispatch(Data, FormatConversionSpecImpl, void*) + +#define ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(...) \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(str_format_internal::VoidPtr, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(bool, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(char, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(signed char, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned char, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(short, __VA_ARGS__); /* NOLINT */ \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned short, /* NOLINT */ \ + __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(int, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned int, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long, __VA_ARGS__); /* NOLINT */ \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned long, /* NOLINT */ \ + __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long long, /* NOLINT */ \ + __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(unsigned long long, /* NOLINT */ \ + __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(int128, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(uint128, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(float, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(double, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(long double, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(const char*, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(std::string, __VA_ARGS__); \ + ABSL_INTERNAL_FORMAT_DISPATCH_INSTANTIATE_(string_view, __VA_ARGS__) + + ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_(extern); + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_ARG_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/bind.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/bind.h new file mode 100644 index 00000000..668eab68 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/bind.h @@ -0,0 +1,276 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_ + +#include +#include +#include +#include + +#include "absl/base/port.h" +#include "absl/container/inlined_vector.h" +#include "absl/strings/internal/str_format/arg.h" +#include "absl/strings/internal/str_format/checker.h" +#include "absl/strings/internal/str_format/parser.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class UntypedFormatSpec; + + namespace str_format_internal + { + + class BoundConversion : public FormatConversionSpecImpl + { + public: + const FormatArgImpl* arg() const + { + return arg_; + } + void set_arg(const FormatArgImpl* a) + { + arg_ = a; + } + + private: + const FormatArgImpl* arg_; + }; + + // This is the type-erased class that the implementation uses. + class UntypedFormatSpecImpl + { + public: + UntypedFormatSpecImpl() = delete; + + explicit UntypedFormatSpecImpl(string_view s) : + data_(s.data()), + size_(s.size()) + { + } + explicit UntypedFormatSpecImpl( + const str_format_internal::ParsedFormatBase* pc + ) : + data_(pc), + size_(~size_t{}) + { + } + + bool has_parsed_conversion() const + { + return size_ == ~size_t{}; + } + + string_view str() const + { + assert(!has_parsed_conversion()); + return string_view(static_cast(data_), size_); + } + const str_format_internal::ParsedFormatBase* parsed_conversion() const + { + assert(has_parsed_conversion()); + return static_cast(data_); + } + + template + static const UntypedFormatSpecImpl& Extract(const T& s) + { + return s.spec_; + } + + private: + const void* data_; + size_t size_; + }; + + template + struct MakeDependent + { + using type = T; + }; + + // Implicitly convertible from `const char*`, `string_view`, and the + // `ExtendedParsedFormat` type. This abstraction allows all format functions to + // operate on any without providing too many overloads. + template + class FormatSpecTemplate : public MakeDependent::type + { + using Base = typename MakeDependent::type; + + template + struct ErrorMaker + { + constexpr bool operator()(int) const + { + return res; + } + }; + + template + static constexpr bool CheckArity(ErrorMaker SpecifierCount = {}, ErrorMaker ParametersPassed = {}) + { + static_assert(SpecifierCount(i) == ParametersPassed(j), "Number of arguments passed must match the number of " + "conversion specifiers."); + return true; + } + + template + static constexpr bool CheckMatch( + ErrorMaker MismatchedArgumentNumber = {} + ) + { + static_assert(MismatchedArgumentNumber(arg), "Passed argument must match specified format."); + return true; + } + + template + static bool CheckMatches(absl::index_sequence) + { + bool res[] = {true, CheckMatch()...}; + (void)res; + return true; + } + + public: +#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + + // Honeypot overload for when the string is not constexpr. + // We use the 'unavailable' attribute to give a better compiler error than + // just 'method is deleted'. + FormatSpecTemplate(...) // NOLINT + __attribute__((unavailable("Format string is not constexpr."))); + + // Honeypot overload for when the format is constexpr and invalid. + // We use the 'unavailable' attribute to give a better compiler error than + // just 'method is deleted'. + // To avoid checking the format twice, we just check that the format is + // constexpr. If it is valid, then the overload below will kick in. + // We add the template here to make this overload have lower priority. + template + FormatSpecTemplate(const char* s) // NOLINT + __attribute__(( + enable_if(str_format_internal::EnsureConstexpr(s), "constexpr trap"), + unavailable( + "Format specified does not match the arguments passed." + ) + )); + + template + FormatSpecTemplate(string_view s) // NOLINT + __attribute__((enable_if(str_format_internal::EnsureConstexpr(s), "constexpr trap"))) : + Base("to avoid noise in the compiler error") + { + static_assert(sizeof(T*) == 0, "Format specified does not match the arguments passed."); + } + + // Good format overload. + FormatSpecTemplate(const char* s) // NOLINT + __attribute__((enable_if(ValidFormatImpl(s), "bad format trap"))) : + Base(s) + { + } + + FormatSpecTemplate(string_view s) // NOLINT + __attribute__((enable_if(ValidFormatImpl(s), "bad format trap"))) : + Base(s) + { + } + +#else // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + + FormatSpecTemplate(const char* s) : + Base(s) + { + } // NOLINT + FormatSpecTemplate(string_view s) : + Base(s) + { + } // NOLINT + +#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + + template + FormatSpecTemplate(const ExtendedParsedFormat& pc) // NOLINT + : + Base(&pc) + { + CheckArity(); + CheckMatches(absl::make_index_sequence{}); + } + }; + + class Streamable + { + public: + Streamable(const UntypedFormatSpecImpl& format, absl::Span args) : + format_(format), + args_(args.begin(), args.end()) + { + } + + std::ostream& Print(std::ostream& os) const; + + friend std::ostream& operator<<(std::ostream& os, const Streamable& l) + { + return l.Print(os); + } + + private: + const UntypedFormatSpecImpl& format_; + absl::InlinedVector args_; + }; + + // for testing + std::string Summarize(UntypedFormatSpecImpl format, absl::Span args); + bool BindWithPack(const UnboundConversion* props, absl::Span pack, BoundConversion* bound); + + bool FormatUntyped(FormatRawSinkImpl raw_sink, UntypedFormatSpecImpl format, absl::Span args); + + std::string& AppendPack(std::string* out, UntypedFormatSpecImpl format, absl::Span args); + + std::string FormatPack(const UntypedFormatSpecImpl format, absl::Span args); + + int FprintF(std::FILE* output, UntypedFormatSpecImpl format, absl::Span args); + int SnprintF(char* output, size_t size, UntypedFormatSpecImpl format, absl::Span args); + + // Returned by Streamed(v). Converts via '%s' to the std::string created + // by std::ostream << v. + template + class StreamedWrapper + { + public: + explicit StreamedWrapper(const T& v) : + v_(v) + { + } + + private: + template + friend ArgConvertResult + FormatConvertImpl(const StreamedWrapper& v, FormatConversionSpecImpl conv, FormatSinkImpl* out); + const T& v_; + }; + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_BIND_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/checker.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/checker.h new file mode 100644 index 00000000..b905700a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/checker.h @@ -0,0 +1,118 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/strings/internal/str_format/arg.h" +#include "absl/strings/internal/str_format/constexpr_parser.h" +#include "absl/strings/internal/str_format/extension.h" + +// Compile time check support for entry points. + +#ifndef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER +// We disable format checker under vscode intellisense compilation. +// See https://github.com/microsoft/vscode-cpptools/issues/3683 for +// more details. +#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && \ + !defined(__INTELLISENSE__) +#define ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1 +#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && + // !defined(__INTELLISENSE__) +#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace str_format_internal + { + +#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + + template + constexpr bool ValidFormatImpl(string_view format) + { + int next_arg = 0; + const char* p = format.data(); + const char* const end = p + format.size(); + constexpr FormatConversionCharSet + kAllowedConvs[(std::max)(sizeof...(C), size_t{1})] = {C...}; + bool used[(std::max)(sizeof...(C), size_t{1})]{}; + constexpr int kNumArgs = sizeof...(C); + while (p != end) + { + while (p != end && *p != '%') + ++p; + if (p == end) + { + break; + } + if (p + 1 >= end) + return false; + if (p[1] == '%') + { + // %% + p += 2; + continue; + } + + UnboundConversion conv(absl::kConstInit); + p = ConsumeUnboundConversion(p + 1, end, &conv, &next_arg); + if (p == nullptr) + return false; + if (conv.arg_position <= 0 || conv.arg_position > kNumArgs) + { + return false; + } + if (!Contains(kAllowedConvs[conv.arg_position - 1], conv.conv)) + { + return false; + } + used[conv.arg_position - 1] = true; + for (auto extra : {conv.width, conv.precision}) + { + if (extra.is_from_arg()) + { + int pos = extra.get_from_arg(); + if (pos <= 0 || pos > kNumArgs) + return false; + used[pos - 1] = true; + if (!Contains(kAllowedConvs[pos - 1], '*')) + { + return false; + } + } + } + } + if (sizeof...(C) != 0) + { + for (bool b : used) + { + if (!b) + return false; + } + } + return true; + } + +#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_CHECKER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/constexpr_parser.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/constexpr_parser.h new file mode 100644 index 00000000..a58f7aeb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/constexpr_parser.h @@ -0,0 +1,676 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_ + +#include +#include +#include + +#include "absl/base/const_init.h" +#include "absl/strings/internal/str_format/extension.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace str_format_internal + { + + enum class LengthMod : std::uint8_t + { + h, + hh, + l, + ll, + L, + j, + z, + t, + q, + none + }; + + // The analyzed properties of a single specified conversion. + struct UnboundConversion + { + // This is a user defined default constructor on purpose to skip the + // initialization of parts of the object that are not necessary. + UnboundConversion() + { + } // NOLINT + + // This constructor is provided for the static checker. We don't want to do + // the unnecessary initialization in the normal case. + explicit constexpr UnboundConversion(absl::ConstInitType) : + arg_position{}, + width{}, + precision{} + { + } + + class InputValue + { + public: + constexpr void set_value(int value) + { + assert(value >= 0); + value_ = value; + } + constexpr int value() const + { + return value_; + } + + // Marks the value as "from arg". aka the '*' format. + // Requires `value >= 1`. + // When set, is_from_arg() return true and get_from_arg() returns the + // original value. + // `value()`'s return value is unspecified in this state. + constexpr void set_from_arg(int value) + { + assert(value > 0); + value_ = -value - 1; + } + constexpr bool is_from_arg() const + { + return value_ < -1; + } + constexpr int get_from_arg() const + { + assert(is_from_arg()); + return -value_ - 1; + } + + private: + int value_ = -1; + }; + + // No need to initialize. It will always be set in the parser. + int arg_position; + + InputValue width; + InputValue precision; + + Flags flags = Flags::kBasic; + LengthMod length_mod = LengthMod::none; + FormatConversionChar conv = FormatConversionCharInternal::kNone; + }; + + // Helper tag class for the table below. + // It allows fast `char -> ConversionChar/LengthMod/Flags` checking and + // conversions. + class ConvTag + { + public: + constexpr ConvTag(FormatConversionChar conversion_char) // NOLINT + : + tag_(static_cast(conversion_char)) + { + } + constexpr ConvTag(LengthMod length_mod) // NOLINT + : + tag_(0x80 | static_cast(length_mod)) + { + } + constexpr ConvTag(Flags flags) // NOLINT + : + tag_(0xc0 | static_cast(flags)) + { + } + constexpr ConvTag() : + tag_(0xFF) + { + } + + constexpr bool is_conv() const + { + return (tag_ & 0x80) == 0; + } + constexpr bool is_length() const + { + return (tag_ & 0xC0) == 0x80; + } + constexpr bool is_flags() const + { + return (tag_ & 0xE0) == 0xC0; + } + + constexpr FormatConversionChar as_conv() const + { + assert(is_conv()); + assert(!is_length()); + assert(!is_flags()); + return static_cast(tag_); + } + constexpr LengthMod as_length() const + { + assert(!is_conv()); + assert(is_length()); + assert(!is_flags()); + return static_cast(tag_ & 0x3F); + } + constexpr Flags as_flags() const + { + assert(!is_conv()); + assert(!is_length()); + assert(is_flags()); + return static_cast(tag_ & 0x1F); + } + + private: + uint8_t tag_; + }; + + struct ConvTagHolder + { + using CC = FormatConversionCharInternal; + using LM = LengthMod; + + // Abbreviations to fit in the table below. + static constexpr auto kFSign = Flags::kSignCol; + static constexpr auto kFAlt = Flags::kAlt; + static constexpr auto kFPos = Flags::kShowPos; + static constexpr auto kFLeft = Flags::kLeft; + static constexpr auto kFZero = Flags::kZero; + + static constexpr ConvTag value[256] = { + {}, {}, {}, {}, {}, {}, {}, {}, // 00-07 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 08-0f + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 10-17 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 18-1f + kFSign, + {}, + {}, + kFAlt, + {}, + {}, + {}, + {}, // !"#$%&' + {}, + {}, + {}, + kFPos, + {}, + kFLeft, + {}, + {}, // ()*+,-./ + kFZero, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 01234567 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 89:;<=>? + {}, + CC::A, + {}, + {}, + {}, + CC::E, + CC::F, + CC::G, // @ABCDEFG + {}, + {}, + {}, + {}, + LM::L, + {}, + {}, + {}, // HIJKLMNO + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // PQRSTUVW + CC::X, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // XYZ[\]^_ + {}, + CC::a, + {}, + CC::c, + CC::d, + CC::e, + CC::f, + CC::g, // `abcdefg + LM::h, + CC::i, + LM::j, + {}, + LM::l, + {}, + CC::n, + CC::o, // hijklmno + CC::p, + LM::q, + {}, + CC::s, + LM::t, + CC::u, + CC::v, + {}, // pqrstuvw + CC::x, + {}, + LM::z, + {}, + {}, + {}, + {}, + {}, // xyz{|}! + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 80-87 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 88-8f + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 90-97 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // 98-9f + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // a0-a7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // a8-af + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // b0-b7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // b8-bf + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // c0-c7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // c8-cf + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // d0-d7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // d8-df + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // e0-e7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // e8-ef + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // f0-f7 + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, // f8-ff + }; + }; + + // Keep a single table for all the conversion chars and length modifiers. + constexpr ConvTag GetTagForChar(char c) + { + return ConvTagHolder::value[static_cast(c)]; + } + + constexpr bool CheckFastPathSetting(const UnboundConversion& conv) + { + bool width_precision_needed = + conv.width.value() >= 0 || conv.precision.value() >= 0; + if (width_precision_needed && conv.flags == Flags::kBasic) + { +#if defined(__clang__) + // Some compilers complain about this in constexpr even when not executed, + // so only enable the error dump in clang. + fprintf(stderr, "basic=%d left=%d show_pos=%d sign_col=%d alt=%d zero=%d " + "width=%d precision=%d\n", + conv.flags == Flags::kBasic ? 1 : 0, + FlagsContains(conv.flags, Flags::kLeft) ? 1 : 0, + FlagsContains(conv.flags, Flags::kShowPos) ? 1 : 0, + FlagsContains(conv.flags, Flags::kSignCol) ? 1 : 0, + FlagsContains(conv.flags, Flags::kAlt) ? 1 : 0, + FlagsContains(conv.flags, Flags::kZero) ? 1 : 0, + conv.width.value(), + conv.precision.value()); +#endif // defined(__clang__) + return false; + } + return true; + } + + constexpr int ParseDigits(char& c, const char*& pos, const char* const end) + { + int digits = c - '0'; + // We do not want to overflow `digits` so we consume at most digits10 + // digits. If there are more digits the parsing will fail later on when the + // digit doesn't match the expected characters. + int num_digits = std::numeric_limits::digits10; + for (;;) + { + if (ABSL_PREDICT_FALSE(pos == end)) + break; + c = *pos++; + if ('0' > c || c > '9') + break; + --num_digits; + if (ABSL_PREDICT_FALSE(!num_digits)) + break; + digits = 10 * digits + c - '0'; + } + return digits; + } + + template + constexpr const char* ConsumeConversion(const char* pos, const char* const end, UnboundConversion* conv, int* next_arg) + { + const char* const original_pos = pos; + char c = 0; + // Read the next char into `c` and update `pos`. Returns false if there are + // no more chars to read. +#define ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR() \ + do \ + { \ + if (ABSL_PREDICT_FALSE(pos == end)) \ + return nullptr; \ + c = *pos++; \ + } while (0) + + if (is_positional) + { + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) + return nullptr; + conv->arg_position = ParseDigits(c, pos, end); + assert(conv->arg_position > 0); + if (ABSL_PREDICT_FALSE(c != '$')) + return nullptr; + } + + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + + // We should start with the basic flag on. + assert(conv->flags == Flags::kBasic); + + // Any non alpha character makes this conversion not basic. + // This includes flags (-+ #0), width (1-9, *) or precision (.). + // All conversion characters and length modifiers are alpha characters. + if (c < 'A') + { + while (c <= '0') + { + auto tag = GetTagForChar(c); + if (tag.is_flags()) + { + conv->flags = conv->flags | tag.as_flags(); + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } + else + { + break; + } + } + + if (c <= '9') + { + if (c >= '0') + { + int maybe_width = ParseDigits(c, pos, end); + if (!is_positional && c == '$') + { + if (ABSL_PREDICT_FALSE(*next_arg != 0)) + return nullptr; + // Positional conversion. + *next_arg = -1; + return ConsumeConversion(original_pos, end, conv, next_arg); + } + conv->flags = conv->flags | Flags::kNonBasic; + conv->width.set_value(maybe_width); + } + else if (c == '*') + { + conv->flags = conv->flags | Flags::kNonBasic; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + if (is_positional) + { + if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) + return nullptr; + conv->width.set_from_arg(ParseDigits(c, pos, end)); + if (ABSL_PREDICT_FALSE(c != '$')) + return nullptr; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } + else + { + conv->width.set_from_arg(++*next_arg); + } + } + } + + if (c == '.') + { + conv->flags = conv->flags | Flags::kNonBasic; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + if ('0' <= c && c <= '9') + { + conv->precision.set_value(ParseDigits(c, pos, end)); + } + else if (c == '*') + { + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + if (is_positional) + { + if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) + return nullptr; + conv->precision.set_from_arg(ParseDigits(c, pos, end)); + if (c != '$') + return nullptr; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } + else + { + conv->precision.set_from_arg(++*next_arg); + } + } + else + { + conv->precision.set_value(0); + } + } + } + + auto tag = GetTagForChar(c); + + if (ABSL_PREDICT_FALSE(c == 'v' && conv->flags != Flags::kBasic)) + { + return nullptr; + } + + if (ABSL_PREDICT_FALSE(!tag.is_conv())) + { + if (ABSL_PREDICT_FALSE(!tag.is_length())) + return nullptr; + + // It is a length modifier. + using str_format_internal::LengthMod; + LengthMod length_mod = tag.as_length(); + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + if (c == 'h' && length_mod == LengthMod::h) + { + conv->length_mod = LengthMod::hh; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } + else if (c == 'l' && length_mod == LengthMod::l) + { + conv->length_mod = LengthMod::ll; + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } + else + { + conv->length_mod = length_mod; + } + tag = GetTagForChar(c); + + if (ABSL_PREDICT_FALSE(c == 'v')) + return nullptr; + if (ABSL_PREDICT_FALSE(!tag.is_conv())) + return nullptr; + } +#undef ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR + + assert(CheckFastPathSetting(*conv)); + (void)(&CheckFastPathSetting); + + conv->conv = tag.as_conv(); + if (!is_positional) + conv->arg_position = ++*next_arg; + return pos; + } + + // Consume conversion spec prefix (not including '%') of [p, end) if valid. + // Examples of valid specs would be e.g.: "s", "d", "-12.6f". + // If valid, it returns the first character following the conversion spec, + // and the spec part is broken down and returned in 'conv'. + // If invalid, returns nullptr. + constexpr const char* ConsumeUnboundConversion(const char* p, const char* end, UnboundConversion* conv, int* next_arg) + { + if (*next_arg < 0) + return ConsumeConversion(p, end, conv, next_arg); + return ConsumeConversion(p, end, conv, next_arg); + } + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_CONSTEXPR_PARSER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/extension.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/extension.h new file mode 100644 index 00000000..377533cc --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/extension.h @@ -0,0 +1,552 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_ + +#include + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/port.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/internal/str_format/output.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + enum class FormatConversionChar : uint8_t; + enum class FormatConversionCharSet : uint64_t; + + namespace str_format_internal + { + + class FormatRawSinkImpl + { + public: + // Implicitly convert from any type that provides the hook function as + // described above. + template(), string_view()))* = nullptr> + FormatRawSinkImpl(T* raw) // NOLINT + : + sink_(raw), + write_(&FormatRawSinkImpl::Flush) + { + } + + void Write(string_view s) + { + write_(sink_, s); + } + + template + static FormatRawSinkImpl Extract(T s) + { + return s.sink_; + } + + private: + template + static void Flush(void* r, string_view s) + { + str_format_internal::InvokeFlush(static_cast(r), s); + } + + void* sink_; + void (*write_)(void*, string_view); + }; + + // An abstraction to which conversions write their string data. + class FormatSinkImpl + { + public: + explicit FormatSinkImpl(FormatRawSinkImpl raw) : + raw_(raw) + { + } + + ~FormatSinkImpl() + { + Flush(); + } + + void Flush() + { + raw_.Write(string_view(buf_, static_cast(pos_ - buf_))); + pos_ = buf_; + } + + void Append(size_t n, char c) + { + if (n == 0) + return; + size_ += n; + auto raw_append = [&](size_t count) + { + memset(pos_, c, count); + pos_ += count; + }; + while (n > Avail()) + { + n -= Avail(); + if (Avail() > 0) + { + raw_append(Avail()); + } + Flush(); + } + raw_append(n); + } + + void Append(string_view v) + { + size_t n = v.size(); + if (n == 0) + return; + size_ += n; + if (n >= Avail()) + { + Flush(); + raw_.Write(v); + return; + } + memcpy(pos_, v.data(), n); + pos_ += n; + } + + size_t size() const + { + return size_; + } + + // Put 'v' to 'sink' with specified width, precision, and left flag. + bool PutPaddedString(string_view v, int width, int precision, bool left); + + template + T Wrap() + { + return T(this); + } + + template + static FormatSinkImpl* Extract(T* s) + { + return s->sink_; + } + + private: + size_t Avail() const + { + return static_cast(buf_ + sizeof(buf_) - pos_); + } + + FormatRawSinkImpl raw_; + size_t size_ = 0; + char* pos_ = buf_; + char buf_[1024]; + }; + + enum class Flags : uint8_t + { + kBasic = 0, + kLeft = 1 << 0, + kShowPos = 1 << 1, + kSignCol = 1 << 2, + kAlt = 1 << 3, + kZero = 1 << 4, + // This is not a real flag. It just exists to turn off kBasic when no other + // flags are set. This is for when width/precision are specified. + kNonBasic = 1 << 5, + }; + + constexpr Flags operator|(Flags a, Flags b) + { + return static_cast(static_cast(a) | static_cast(b)); + } + + constexpr bool FlagsContains(Flags haystack, Flags needle) + { + return (static_cast(haystack) & static_cast(needle)) == + static_cast(needle); + } + + std::string FlagsToString(Flags v); + + inline std::ostream& operator<<(std::ostream& os, Flags v) + { + return os << FlagsToString(v); + } + +// clang-format off +#define ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, X_SEP) \ + /* text */ \ + X_VAL(c) X_SEP X_VAL(s) X_SEP \ + /* ints */ \ + X_VAL(d) X_SEP X_VAL(i) X_SEP X_VAL(o) X_SEP \ + X_VAL(u) X_SEP X_VAL(x) X_SEP X_VAL(X) X_SEP \ + /* floats */ \ + X_VAL(f) X_SEP X_VAL(F) X_SEP X_VAL(e) X_SEP X_VAL(E) X_SEP \ + X_VAL(g) X_SEP X_VAL(G) X_SEP X_VAL(a) X_SEP X_VAL(A) X_SEP \ + /* misc */ \ + X_VAL(n) X_SEP X_VAL(p) X_SEP X_VAL(v) + // clang-format on + + // This type should not be referenced, it exists only to provide labels + // internally that match the values declared in FormatConversionChar in + // str_format.h. This is meant to allow internal libraries to use the same + // declared interface type as the public interface + // (absl::StrFormatConversionChar) while keeping the definition in a public + // header. + // Internal libraries should use the form + // `FormatConversionCharInternal::c`, `FormatConversionCharInternal::kNone` for + // comparisons. Use in switch statements is not recommended due to a bug in how + // gcc 4.9 -Wswitch handles declared but undefined enums. + struct FormatConversionCharInternal + { + FormatConversionCharInternal() = delete; + + private: + // clang-format off + enum class Enum : uint8_t { + c, s, // text + d, i, o, u, x, X, // int + f, F, e, E, g, G, a, A, // float + n, p, v, // misc + kNone + }; + // clang-format on + + public: +#define ABSL_INTERNAL_X_VAL(id) \ + static constexpr FormatConversionChar id = \ + static_cast(Enum::id); + ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, ) +#undef ABSL_INTERNAL_X_VAL + static constexpr FormatConversionChar kNone = + static_cast(Enum::kNone); + }; + // clang-format on + + inline FormatConversionChar FormatConversionCharFromChar(char c) + { + switch (c) + { +#define ABSL_INTERNAL_X_VAL(id) \ + case #id[0]: \ + return FormatConversionCharInternal::id; + ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, ) +#undef ABSL_INTERNAL_X_VAL + } + return FormatConversionCharInternal::kNone; + } + + inline bool FormatConversionCharIsUpper(FormatConversionChar c) + { + if (c == FormatConversionCharInternal::X || + c == FormatConversionCharInternal::F || + c == FormatConversionCharInternal::E || + c == FormatConversionCharInternal::G || + c == FormatConversionCharInternal::A) + { + return true; + } + else + { + return false; + } + } + + inline bool FormatConversionCharIsFloat(FormatConversionChar c) + { + if (c == FormatConversionCharInternal::a || + c == FormatConversionCharInternal::e || + c == FormatConversionCharInternal::f || + c == FormatConversionCharInternal::g || + c == FormatConversionCharInternal::A || + c == FormatConversionCharInternal::E || + c == FormatConversionCharInternal::F || + c == FormatConversionCharInternal::G) + { + return true; + } + else + { + return false; + } + } + + inline char FormatConversionCharToChar(FormatConversionChar c) + { + if (c == FormatConversionCharInternal::kNone) + { + return '\0'; + +#define ABSL_INTERNAL_X_VAL(e) \ + } \ + else if (c == FormatConversionCharInternal::e) \ + { \ + return #e[0]; +#define ABSL_INTERNAL_X_SEP + ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, ABSL_INTERNAL_X_SEP) + } + else + { + return '\0'; + } + +#undef ABSL_INTERNAL_X_VAL +#undef ABSL_INTERNAL_X_SEP + } + + // The associated char. + inline std::ostream& operator<<(std::ostream& os, FormatConversionChar v) + { + char c = FormatConversionCharToChar(v); + if (!c) + c = '?'; + return os << c; + } + + struct FormatConversionSpecImplFriend; + + class FormatConversionSpecImpl + { + public: + // Width and precision are not specified, no flags are set. + bool is_basic() const + { + return flags_ == Flags::kBasic; + } + bool has_left_flag() const + { + return FlagsContains(flags_, Flags::kLeft); + } + bool has_show_pos_flag() const + { + return FlagsContains(flags_, Flags::kShowPos); + } + bool has_sign_col_flag() const + { + return FlagsContains(flags_, Flags::kSignCol); + } + bool has_alt_flag() const + { + return FlagsContains(flags_, Flags::kAlt); + } + bool has_zero_flag() const + { + return FlagsContains(flags_, Flags::kZero); + } + + FormatConversionChar conversion_char() const + { + // Keep this field first in the struct . It generates better code when + // accessing it when ConversionSpec is passed by value in registers. + static_assert(offsetof(FormatConversionSpecImpl, conv_) == 0, ""); + return conv_; + } + + void set_conversion_char(FormatConversionChar c) + { + conv_ = c; + } + + // Returns the specified width. If width is unspecfied, it returns a negative + // value. + int width() const + { + return width_; + } + // Returns the specified precision. If precision is unspecfied, it returns a + // negative value. + int precision() const + { + return precision_; + } + + template + T Wrap() + { + return T(*this); + } + + private: + friend struct str_format_internal::FormatConversionSpecImplFriend; + FormatConversionChar conv_ = FormatConversionCharInternal::kNone; + Flags flags_; + int width_; + int precision_; + }; + + struct FormatConversionSpecImplFriend final + { + static void SetFlags(Flags f, FormatConversionSpecImpl* conv) + { + conv->flags_ = f; + } + static void SetConversionChar(FormatConversionChar c, FormatConversionSpecImpl* conv) + { + conv->conv_ = c; + } + static void SetWidth(int w, FormatConversionSpecImpl* conv) + { + conv->width_ = w; + } + static void SetPrecision(int p, FormatConversionSpecImpl* conv) + { + conv->precision_ = p; + } + static std::string FlagsToString(const FormatConversionSpecImpl& spec) + { + return str_format_internal::FlagsToString(spec.flags_); + } + }; + + // Type safe OR operator. + // We need this for two reasons: + // 1. operator| on enums makes them decay to integers and the result is an + // integer. We need the result to stay as an enum. + // 2. We use "enum class" which would not work even if we accepted the decay. + constexpr FormatConversionCharSet FormatConversionCharSetUnion( + FormatConversionCharSet a + ) + { + return a; + } + + template + constexpr FormatConversionCharSet FormatConversionCharSetUnion( + FormatConversionCharSet a, CharSet... rest + ) + { + return static_cast( + static_cast(a) | + static_cast(FormatConversionCharSetUnion(rest...)) + ); + } + + constexpr uint64_t FormatConversionCharToConvInt(FormatConversionChar c) + { + return uint64_t{1} << (1 + static_cast(c)); + } + + constexpr uint64_t FormatConversionCharToConvInt(char conv) + { + return +#define ABSL_INTERNAL_CHAR_SET_CASE(c) \ + conv == #c[0] ? FormatConversionCharToConvInt(FormatConversionCharInternal::c): + ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, ) +#undef ABSL_INTERNAL_CHAR_SET_CASE + conv == '*' ? + 1 : + 0; + } + + constexpr FormatConversionCharSet FormatConversionCharToConvValue(char conv) + { + return static_cast( + FormatConversionCharToConvInt(conv) + ); + } + + struct FormatConversionCharSetInternal + { +#define ABSL_INTERNAL_CHAR_SET_CASE(c) \ + static constexpr FormatConversionCharSet c = \ + FormatConversionCharToConvValue(#c[0]); + ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, ) +#undef ABSL_INTERNAL_CHAR_SET_CASE + + // Used for width/precision '*' specification. + static constexpr FormatConversionCharSet kStar = + FormatConversionCharToConvValue('*'); + + static constexpr FormatConversionCharSet kIntegral = + FormatConversionCharSetUnion(d, i, u, o, x, X); + static constexpr FormatConversionCharSet kFloating = + FormatConversionCharSetUnion(a, e, f, g, A, E, F, G); + static constexpr FormatConversionCharSet kNumeric = + FormatConversionCharSetUnion(kIntegral, kFloating); + static constexpr FormatConversionCharSet kPointer = p; + }; + + // Type safe OR operator. + // We need this for two reasons: + // 1. operator| on enums makes them decay to integers and the result is an + // integer. We need the result to stay as an enum. + // 2. We use "enum class" which would not work even if we accepted the decay. + constexpr FormatConversionCharSet operator|(FormatConversionCharSet a, FormatConversionCharSet b) + { + return FormatConversionCharSetUnion(a, b); + } + + // Overloaded conversion functions to support absl::ParsedFormat. + // Get a conversion with a single character in it. + constexpr FormatConversionCharSet ToFormatConversionCharSet(char c) + { + return static_cast( + FormatConversionCharToConvValue(c) + ); + } + + // Get a conversion with a single character in it. + constexpr FormatConversionCharSet ToFormatConversionCharSet( + FormatConversionCharSet c + ) + { + return c; + } + + template + void ToFormatConversionCharSet(T) = delete; + + // Checks whether `c` exists in `set`. + constexpr bool Contains(FormatConversionCharSet set, char c) + { + return (static_cast(set) & + static_cast(FormatConversionCharToConvValue(c))) != 0; + } + + // Checks whether all the characters in `c` are contained in `set` + constexpr bool Contains(FormatConversionCharSet set, FormatConversionCharSet c) + { + return (static_cast(set) & static_cast(c)) == + static_cast(c); + } + + // Checks whether all the characters in `c` are contained in `set` + constexpr bool Contains(FormatConversionCharSet set, FormatConversionChar c) + { + return (static_cast(set) & FormatConversionCharToConvInt(c)) != 0; + } + + // Return capacity - used, clipped to a minimum of 0. + inline size_t Excess(size_t used, size_t capacity) + { + return used < capacity ? capacity - used : 0; + } + + } // namespace str_format_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_EXTENSION_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/float_conversion.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/float_conversion.h new file mode 100644 index 00000000..61bc504c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/float_conversion.h @@ -0,0 +1,36 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_ + +#include "absl/strings/internal/str_format/extension.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace str_format_internal + { + + bool ConvertFloatImpl(float v, const FormatConversionSpecImpl& conv, FormatSinkImpl* sink); + + bool ConvertFloatImpl(double v, const FormatConversionSpecImpl& conv, FormatSinkImpl* sink); + + bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl& conv, FormatSinkImpl* sink); + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_FLOAT_CONVERSION_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/output.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/output.h new file mode 100644 index 00000000..1a063195 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/output.h @@ -0,0 +1,122 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Output extension hooks for the Format library. +// `internal::InvokeFlush` calls the appropriate flush function for the +// specified output argument. +// `BufferRawSink` is a simple output sink for a char buffer. Used by SnprintF. +// `FILERawSink` is a std::FILE* based sink. Used by PrintF and FprintF. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_ + +#include +#include +#include +#include + +#include "absl/base/port.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace str_format_internal + { + + // RawSink implementation that writes into a char* buffer. + // It will not overflow the buffer, but will keep the total count of chars + // that would have been written. + class BufferRawSink + { + public: + BufferRawSink(char* buffer, size_t size) : + buffer_(buffer), + size_(size) + { + } + + size_t total_written() const + { + return total_written_; + } + void Write(string_view v); + + private: + char* buffer_; + size_t size_; + size_t total_written_ = 0; + }; + + // RawSink implementation that writes into a FILE*. + // It keeps track of the total number of bytes written and any error encountered + // during the writes. + class FILERawSink + { + public: + explicit FILERawSink(std::FILE* output) : + output_(output) + { + } + + void Write(string_view v); + + size_t count() const + { + return count_; + } + int error() const + { + return error_; + } + + private: + std::FILE* output_; + int error_ = 0; + size_t count_ = 0; + }; + + // Provide RawSink integration with common types from the STL. + inline void AbslFormatFlush(std::string* out, string_view s) + { + out->append(s.data(), s.size()); + } + inline void AbslFormatFlush(std::ostream* out, string_view s) + { + out->write(s.data(), static_cast(s.size())); + } + + inline void AbslFormatFlush(FILERawSink* sink, string_view v) + { + sink->Write(v); + } + + inline void AbslFormatFlush(BufferRawSink* sink, string_view v) + { + sink->Write(v); + } + + // This is a SFINAE to get a better compiler error message when the type + // is not supported. + template + auto InvokeFlush(T* out, string_view s) -> decltype(AbslFormatFlush(out, s)) + { + AbslFormatFlush(out, s); + } + + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_format/parser.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/parser.h new file mode 100644 index 00000000..3f0347ea --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_format/parser.h @@ -0,0 +1,317 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_ +#define ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/internal/str_format/checker.h" +#include "absl/strings/internal/str_format/constexpr_parser.h" +#include "absl/strings/internal/str_format/extension.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace str_format_internal + { + + std::string LengthModToString(LengthMod v); + + const char* ConsumeUnboundConversionNoInline(const char* p, const char* end, UnboundConversion* conv, int* next_arg); + + // Parse the format string provided in 'src' and pass the identified items into + // 'consumer'. + // Text runs will be passed by calling + // Consumer::Append(string_view); + // ConversionItems will be passed by calling + // Consumer::ConvertOne(UnboundConversion, string_view); + // In the case of ConvertOne, the string_view that is passed is the + // portion of the format string corresponding to the conversion, not including + // the leading %. On success, it returns true. On failure, it stops and returns + // false. + template + bool ParseFormatString(string_view src, Consumer consumer) + { + int next_arg = 0; + const char* p = src.data(); + const char* const end = p + src.size(); + while (p != end) + { + const char* percent = + static_cast(memchr(p, '%', static_cast(end - p))); + if (!percent) + { + // We found the last substring. + return consumer.Append(string_view(p, static_cast(end - p))); + } + // We found a percent, so push the text run then process the percent. + if (ABSL_PREDICT_FALSE(!consumer.Append( + string_view(p, static_cast(percent - p)) + ))) + { + return false; + } + if (ABSL_PREDICT_FALSE(percent + 1 >= end)) + return false; + + auto tag = GetTagForChar(percent[1]); + if (tag.is_conv()) + { + if (ABSL_PREDICT_FALSE(next_arg < 0)) + { + // This indicates an error in the format string. + // The only way to get `next_arg < 0` here is to have a positional + // argument first which sets next_arg to -1 and then a non-positional + // argument. + return false; + } + p = percent + 2; + + // Keep this case separate from the one below. + // ConvertOne is more efficient when the compiler can see that the `basic` + // flag is set. + UnboundConversion conv; + conv.conv = tag.as_conv(); + conv.arg_position = ++next_arg; + if (ABSL_PREDICT_FALSE( + !consumer.ConvertOne(conv, string_view(percent + 1, 1)) + )) + { + return false; + } + } + else if (percent[1] != '%') + { + UnboundConversion conv; + p = ConsumeUnboundConversionNoInline(percent + 1, end, &conv, &next_arg); + if (ABSL_PREDICT_FALSE(p == nullptr)) + return false; + if (ABSL_PREDICT_FALSE(!consumer.ConvertOne( + conv, string_view(percent + 1, static_cast(p - (percent + 1))) + ))) + { + return false; + } + } + else + { + if (ABSL_PREDICT_FALSE(!consumer.Append("%"))) + return false; + p = percent + 2; + continue; + } + } + return true; + } + + // Always returns true, or fails to compile in a constexpr context if s does not + // point to a constexpr char array. + constexpr bool EnsureConstexpr(string_view s) + { + return s.empty() || s[0] == s[0]; + } + + class ParsedFormatBase + { + public: + explicit ParsedFormatBase( + string_view format, bool allow_ignored, std::initializer_list convs + ); + + ParsedFormatBase(const ParsedFormatBase& other) + { + *this = other; + } + + ParsedFormatBase(ParsedFormatBase&& other) + { + *this = std::move(other); + } + + ParsedFormatBase& operator=(const ParsedFormatBase& other) + { + if (this == &other) + return *this; + has_error_ = other.has_error_; + items_ = other.items_; + size_t text_size = items_.empty() ? 0 : items_.back().text_end; + data_.reset(new char[text_size]); + memcpy(data_.get(), other.data_.get(), text_size); + return *this; + } + + ParsedFormatBase& operator=(ParsedFormatBase&& other) + { + if (this == &other) + return *this; + has_error_ = other.has_error_; + data_ = std::move(other.data_); + items_ = std::move(other.items_); + // Reset the vector to make sure the invariants hold. + other.items_.clear(); + return *this; + } + + template + bool ProcessFormat(Consumer consumer) const + { + const char* const base = data_.get(); + string_view text(base, 0); + for (const auto& item : items_) + { + const char* const end = text.data() + text.size(); + text = + string_view(end, static_cast((base + item.text_end) - end)); + if (item.is_conversion) + { + if (!consumer.ConvertOne(item.conv, text)) + return false; + } + else + { + if (!consumer.Append(text)) + return false; + } + } + return !has_error_; + } + + bool has_error() const + { + return has_error_; + } + + private: + // Returns whether the conversions match and if !allow_ignored it verifies + // that all conversions are used by the format. + bool MatchesConversions( + bool allow_ignored, + std::initializer_list convs + ) const; + + struct ParsedFormatConsumer; + + struct ConversionItem + { + bool is_conversion; + // Points to the past-the-end location of this element in the data_ array. + size_t text_end; + UnboundConversion conv; + }; + + bool has_error_; + std::unique_ptr data_; + std::vector items_; + }; + + // A value type representing a preparsed format. These can be created, copied + // around, and reused to speed up formatting loops. + // The user must specify through the template arguments the conversion + // characters used in the format. This will be checked at compile time. + // + // This class uses Conv enum values to specify each argument. + // This allows for more flexibility as you can specify multiple possible + // conversion characters for each argument. + // ParsedFormat is a simplified alias for when the user only + // needs to specify a single conversion character for each argument. + // + // Example: + // // Extended format supports multiple characters per argument: + // using MyFormat = ExtendedParsedFormat; + // MyFormat GetFormat(bool use_hex) { + // if (use_hex) return MyFormat("foo %x bar"); + // return MyFormat("foo %d bar"); + // } + // // 'format' can be used with any value that supports 'd' and 'x', + // // like `int`. + // auto format = GetFormat(use_hex); + // value = StringF(format, i); + // + // This class also supports runtime format checking with the ::New() and + // ::NewAllowIgnored() factory functions. + // This is the only API that allows the user to pass a runtime specified format + // string. These factory functions will return NULL if the format does not match + // the conversions requested by the user. + template + class ExtendedParsedFormat : public str_format_internal::ParsedFormatBase + { + public: + explicit ExtendedParsedFormat(string_view format) +#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + __attribute__(( + enable_if(str_format_internal::EnsureConstexpr(format), "Format string is not constexpr."), + enable_if(str_format_internal::ValidFormatImpl(format), "Format specified does not match the template arguments.") + )) +#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER + : + ExtendedParsedFormat(format, false) + { + } + + // ExtendedParsedFormat factory function. + // The user still has to specify the conversion characters, but they will not + // be checked at compile time. Instead, it will be checked at runtime. + // This delays the checking to runtime, but allows the user to pass + // dynamically sourced formats. + // It returns NULL if the format does not match the conversion characters. + // The user is responsible for checking the return value before using it. + // + // The 'New' variant will check that all the specified arguments are being + // consumed by the format and return NULL if any argument is being ignored. + // The 'NewAllowIgnored' variant will not verify this and will allow formats + // that ignore arguments. + static std::unique_ptr New(string_view format) + { + return New(format, false); + } + static std::unique_ptr NewAllowIgnored( + string_view format + ) + { + return New(format, true); + } + + private: + static std::unique_ptr New(string_view format, bool allow_ignored) + { + std::unique_ptr conv( + new ExtendedParsedFormat(format, allow_ignored) + ); + if (conv->has_error()) + return nullptr; + return conv; + } + + ExtendedParsedFormat(string_view s, bool allow_ignored) : + ParsedFormatBase(s, allow_ignored, {C...}) + { + } + }; + } // namespace str_format_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_FORMAT_PARSER_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_join_internal.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_join_internal.h new file mode 100644 index 00000000..f4ed31b1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_join_internal.h @@ -0,0 +1,359 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This file declares INTERNAL parts of the Join API that are inlined/templated +// or otherwise need to be available at compile time. The main abstractions +// defined in this file are: +// +// - A handful of default Formatters +// - JoinAlgorithm() overloads +// - JoinRange() overloads +// - JoinTuple() +// +// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including +// absl/strings/str_join.h +// +// IWYU pragma: private, include "absl/strings/str_join.h" + +#ifndef ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_ +#define ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/internal/ostringstream.h" +#include "absl/strings/internal/resize_uninitialized.h" +#include "absl/strings/str_cat.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // + // Formatter objects + // + // The following are implementation classes for standard Formatter objects. The + // factory functions that users will call to create and use these formatters are + // defined and documented in strings/join.h. + // + + // The default formatter. Converts alpha-numeric types to strings. + struct AlphaNumFormatterImpl + { + // This template is needed in order to support passing in a dereferenced + // vector::iterator + template + void operator()(std::string* out, const T& t) const + { + StrAppend(out, AlphaNum(t)); + } + + void operator()(std::string* out, const AlphaNum& t) const + { + StrAppend(out, t); + } + }; + + // A type that's used to overload the JoinAlgorithm() function (defined below) + // for ranges that do not require additional formatting (e.g., a range of + // strings). + + struct NoFormatter : public AlphaNumFormatterImpl + { + }; + + // Formats types to strings using the << operator. + class StreamFormatterImpl + { + public: + // The method isn't const because it mutates state. Making it const will + // render StreamFormatterImpl thread-hostile. + template + void operator()(std::string* out, const T& t) + { + // The stream is created lazily to avoid paying the relatively high cost + // of its construction when joining an empty range. + if (strm_) + { + strm_->clear(); // clear the bad, fail and eof bits in case they were set + strm_->str(out); + } + else + { + strm_.reset(new strings_internal::OStringStream(out)); + } + *strm_ << t; + } + + private: + std::unique_ptr strm_; + }; + + // Formats a std::pair<>. The 'first' member is formatted using f1_ and the + // 'second' member is formatted using f2_. sep_ is the separator. + template + class PairFormatterImpl + { + public: + PairFormatterImpl(F1 f1, absl::string_view sep, F2 f2) : + f1_(std::move(f1)), + sep_(sep), + f2_(std::move(f2)) + { + } + + template + void operator()(std::string* out, const T& p) + { + f1_(out, p.first); + out->append(sep_); + f2_(out, p.second); + } + + template + void operator()(std::string* out, const T& p) const + { + f1_(out, p.first); + out->append(sep_); + f2_(out, p.second); + } + + private: + F1 f1_; + std::string sep_; + F2 f2_; + }; + + // Wraps another formatter and dereferences the argument to operator() then + // passes the dereferenced argument to the wrapped formatter. This can be + // useful, for example, to join a std::vector. + template + class DereferenceFormatterImpl + { + public: + DereferenceFormatterImpl() : + f_() + { + } + explicit DereferenceFormatterImpl(Formatter&& f) : + f_(std::forward(f)) + { + } + + template + void operator()(std::string* out, const T& t) + { + f_(out, *t); + } + + template + void operator()(std::string* out, const T& t) const + { + f_(out, *t); + } + + private: + Formatter f_; + }; + + // DefaultFormatter is a traits class that selects a default Formatter to use + // for the given type T. The ::Type member names the Formatter to use. This is + // used by the strings::Join() functions that do NOT take a Formatter argument, + // in which case a default Formatter must be chosen. + // + // AlphaNumFormatterImpl is the default in the base template, followed by + // specializations for other types. + template + struct DefaultFormatter + { + typedef AlphaNumFormatterImpl Type; + }; + template<> + struct DefaultFormatter + { + typedef AlphaNumFormatterImpl Type; + }; + template<> + struct DefaultFormatter + { + typedef AlphaNumFormatterImpl Type; + }; + template<> + struct DefaultFormatter + { + typedef NoFormatter Type; + }; + template<> + struct DefaultFormatter + { + typedef NoFormatter Type; + }; + template + struct DefaultFormatter + { + typedef DereferenceFormatterImpl::Type> + Type; + }; + + template + struct DefaultFormatter> : public DefaultFormatter + { + }; + + // + // JoinAlgorithm() functions + // + + // The main joining algorithm. This simply joins the elements in the given + // iterator range, each separated by the given separator, into an output string, + // and formats each element using the provided Formatter object. + template + std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, Formatter&& f) + { + std::string result; + absl::string_view sep(""); + for (Iterator it = start; it != end; ++it) + { + result.append(sep.data(), sep.size()); + f(&result, *it); + sep = s; + } + return result; + } + + // A joining algorithm that's optimized for a forward iterator range of + // string-like objects that do not need any additional formatting. This is to + // optimize the common case of joining, say, a std::vector or a + // std::vector. + // + // This is an overload of the previous JoinAlgorithm() function. Here the + // Formatter argument is of type NoFormatter. Since NoFormatter is an internal + // type, this overload is only invoked when strings::Join() is called with a + // range of string-like objects (e.g., std::string, absl::string_view), and an + // explicit Formatter argument was NOT specified. + // + // The optimization is that the needed space will be reserved in the output + // string to avoid the need to resize while appending. To do this, the iterator + // range will be traversed twice: once to calculate the total needed size, and + // then again to copy the elements and delimiters to the output string. + template::iterator_category, std::forward_iterator_tag>::value>::type> + std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, NoFormatter) + { + std::string result; + if (start != end) + { + // Sums size + auto&& start_value = *start; + size_t result_size = start_value.size(); + for (Iterator it = start; ++it != end;) + { + result_size += s.size(); + result_size += (*it).size(); + } + + if (result_size > 0) + { + STLStringResizeUninitialized(&result, result_size); + + // Joins strings + char* result_buf = &*result.begin(); + + memcpy(result_buf, start_value.data(), start_value.size()); + result_buf += start_value.size(); + for (Iterator it = start; ++it != end;) + { + memcpy(result_buf, s.data(), s.size()); + result_buf += s.size(); + auto&& value = *it; + memcpy(result_buf, value.data(), value.size()); + result_buf += value.size(); + } + } + } + + return result; + } + + // JoinTupleLoop implements a loop over the elements of a std::tuple, which + // are heterogeneous. The primary template matches the tuple interior case. It + // continues the iteration after appending a separator (for nonzero indices) + // and formatting an element of the tuple. The specialization for the I=N case + // matches the end-of-tuple, and terminates the iteration. + template + struct JoinTupleLoop + { + template + void operator()(std::string* out, const Tup& tup, absl::string_view sep, Formatter&& fmt) + { + if (I > 0) + out->append(sep.data(), sep.size()); + fmt(out, std::get(tup)); + JoinTupleLoop()(out, tup, sep, fmt); + } + }; + template + struct JoinTupleLoop + { + template + void operator()(std::string*, const Tup&, absl::string_view, Formatter&&) + { + } + }; + + template + std::string JoinAlgorithm(const std::tuple& tup, absl::string_view sep, Formatter&& fmt) + { + std::string result; + JoinTupleLoop<0, sizeof...(T)>()(&result, tup, sep, fmt); + return result; + } + + template + std::string JoinRange(Iterator first, Iterator last, absl::string_view separator) + { + // No formatter was explicitly given, so a default must be chosen. + typedef typename std::iterator_traits::value_type ValueType; + typedef typename DefaultFormatter::Type Formatter; + return JoinAlgorithm(first, last, separator, Formatter()); + } + + template + std::string JoinRange(const Range& range, absl::string_view separator, Formatter&& fmt) + { + using std::begin; + using std::end; + return JoinAlgorithm(begin(range), end(range), separator, fmt); + } + + template + std::string JoinRange(const Range& range, absl::string_view separator) + { + using std::begin; + using std::end; + return JoinRange(begin(range), end(range), separator); + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_JOIN_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/str_split_internal.h b/CAPI/cpp/grpc/include/absl/strings/internal/str_split_internal.h new file mode 100644 index 00000000..aae6b195 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/str_split_internal.h @@ -0,0 +1,578 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This file declares INTERNAL parts of the Split API that are inline/templated +// or otherwise need to be available at compile time. The main abstractions +// defined in here are +// +// - ConvertibleToStringView +// - SplitIterator<> +// - Splitter<> +// +// DO NOT INCLUDE THIS FILE DIRECTLY. Use this file by including +// absl/strings/str_split.h. +// +// IWYU pragma: private, include "absl/strings/str_split.h" + +#ifndef ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_ +#define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +#ifdef _GLIBCXX_DEBUG +#include "absl/strings/internal/stl_type_traits.h" +#endif // _GLIBCXX_DEBUG + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // This class is implicitly constructible from everything that absl::string_view + // is implicitly constructible from, except for rvalue strings. This means it + // can be used as a function parameter in places where passing a temporary + // string might cause memory lifetime issues. + class ConvertibleToStringView + { + public: + ConvertibleToStringView(const char* s) // NOLINT(runtime/explicit) + : + value_(s) + { + } + ConvertibleToStringView(char* s) : + value_(s) + { + } // NOLINT(runtime/explicit) + ConvertibleToStringView(absl::string_view s) // NOLINT(runtime/explicit) + : + value_(s) + { + } + ConvertibleToStringView(const std::string& s) // NOLINT(runtime/explicit) + : + value_(s) + { + } + + // Disable conversion from rvalue strings. + ConvertibleToStringView(std::string&& s) = delete; + ConvertibleToStringView(const std::string&& s) = delete; + + absl::string_view value() const + { + return value_; + } + + private: + absl::string_view value_; + }; + + // An iterator that enumerates the parts of a string from a Splitter. The text + // to be split, the Delimiter, and the Predicate are all taken from the given + // Splitter object. Iterators may only be compared if they refer to the same + // Splitter instance. + // + // This class is NOT part of the public splitting API. + template + class SplitIterator + { + public: + using iterator_category = std::input_iterator_tag; + using value_type = absl::string_view; + using difference_type = ptrdiff_t; + using pointer = const value_type*; + using reference = const value_type&; + + enum State + { + kInitState, + kLastState, + kEndState + }; + SplitIterator(State state, const Splitter* splitter) : + pos_(0), + state_(state), + splitter_(splitter), + delimiter_(splitter->delimiter()), + predicate_(splitter->predicate()) + { + // Hack to maintain backward compatibility. This one block makes it so an + // empty absl::string_view whose .data() happens to be nullptr behaves + // *differently* from an otherwise empty absl::string_view whose .data() is + // not nullptr. This is an undesirable difference in general, but this + // behavior is maintained to avoid breaking existing code that happens to + // depend on this old behavior/bug. Perhaps it will be fixed one day. The + // difference in behavior is as follows: + // Split(absl::string_view(""), '-'); // {""} + // Split(absl::string_view(), '-'); // {} + if (splitter_->text().data() == nullptr) + { + state_ = kEndState; + pos_ = splitter_->text().size(); + return; + } + + if (state_ == kEndState) + { + pos_ = splitter_->text().size(); + } + else + { + ++(*this); + } + } + + bool at_end() const + { + return state_ == kEndState; + } + + reference operator*() const + { + return curr_; + } + pointer operator->() const + { + return &curr_; + } + + SplitIterator& operator++() + { + do + { + if (state_ == kLastState) + { + state_ = kEndState; + return *this; + } + const absl::string_view text = splitter_->text(); + const absl::string_view d = delimiter_.Find(text, pos_); + if (d.data() == text.data() + text.size()) + state_ = kLastState; + curr_ = text.substr(pos_, static_cast(d.data() - (text.data() + pos_))); + pos_ += curr_.size() + d.size(); + } while (!predicate_(curr_)); + return *this; + } + + SplitIterator operator++(int) + { + SplitIterator old(*this); + ++(*this); + return old; + } + + friend bool operator==(const SplitIterator& a, const SplitIterator& b) + { + return a.state_ == b.state_ && a.pos_ == b.pos_; + } + + friend bool operator!=(const SplitIterator& a, const SplitIterator& b) + { + return !(a == b); + } + + private: + size_t pos_; + State state_; + absl::string_view curr_; + const Splitter* splitter_; + typename Splitter::DelimiterType delimiter_; + typename Splitter::PredicateType predicate_; + }; + + // HasMappedType::value is true iff there exists a type T::mapped_type. + template + struct HasMappedType : std::false_type + { + }; + template + struct HasMappedType> : std::true_type + { + }; + + // HasValueType::value is true iff there exists a type T::value_type. + template + struct HasValueType : std::false_type + { + }; + template + struct HasValueType> : std::true_type + { + }; + + // HasConstIterator::value is true iff there exists a type T::const_iterator. + template + struct HasConstIterator : std::false_type + { + }; + template + struct HasConstIterator> : std::true_type + { + }; + + // HasEmplace::value is true iff there exists a method T::emplace(). + template + struct HasEmplace : std::false_type + { + }; + template + struct HasEmplace().emplace())>> : std::true_type + { + }; + + // IsInitializerList::value is true iff T is an std::initializer_list. More + // details below in Splitter<> where this is used. + std::false_type IsInitializerListDispatch(...); // default: No + template + std::true_type IsInitializerListDispatch(std::initializer_list*); + template + struct IsInitializerList : decltype(IsInitializerListDispatch(static_cast(nullptr))) + { + }; + + // A SplitterIsConvertibleTo::type alias exists iff the specified condition + // is true for type 'C'. + // + // Restricts conversion to container-like types (by testing for the presence of + // a const_iterator member type) and also to disable conversion to an + // std::initializer_list (which also has a const_iterator). Otherwise, code + // compiled in C++11 will get an error due to ambiguous conversion paths (in + // C++11 std::vector::operator= is overloaded to take either a std::vector + // or an std::initializer_list). + + template + struct SplitterIsConvertibleToImpl : std::false_type + { + }; + + template + struct SplitterIsConvertibleToImpl : std::is_constructible + { + }; + + template + struct SplitterIsConvertibleToImpl : absl::conjunction, std::is_constructible> + { + }; + + template + struct SplitterIsConvertibleTo : SplitterIsConvertibleToImpl::value && +#endif // _GLIBCXX_DEBUG + !IsInitializerList::type>::value && HasValueType::value && HasConstIterator::value, + HasMappedType::value> + { + }; + + template + struct ShouldUseLifetimeBound : std::false_type + { + }; + + template + struct ShouldUseLifetimeBound< + StringType, + Container, + std::enable_if_t< + std::is_same::value && + std::is_same::value>> : std::true_type + { + }; + + template + using ShouldUseLifetimeBoundForPair = std::integral_constant< + bool, + std::is_same::value && + (std::is_same::value || + std::is_same::value)>; + + // This class implements the range that is returned by absl::StrSplit(). This + // class has templated conversion operators that allow it to be implicitly + // converted to a variety of types that the caller may have specified on the + // left-hand side of an assignment. + // + // The main interface for interacting with this class is through its implicit + // conversion operators. However, this class may also be used like a container + // in that it has .begin() and .end() member functions. It may also be used + // within a range-for loop. + // + // Output containers can be collections of any type that is constructible from + // an absl::string_view. + // + // An Predicate functor may be supplied. This predicate will be used to filter + // the split strings: only strings for which the predicate returns true will be + // kept. A Predicate object is any unary functor that takes an absl::string_view + // and returns bool. + // + // The StringType parameter can be either string_view or string, depending on + // whether the Splitter refers to a string stored elsewhere, or if the string + // resides inside the Splitter itself. + template + class Splitter + { + public: + using DelimiterType = Delimiter; + using PredicateType = Predicate; + using const_iterator = strings_internal::SplitIterator; + using value_type = typename std::iterator_traits::value_type; + + Splitter(StringType input_text, Delimiter d, Predicate p) : + text_(std::move(input_text)), + delimiter_(std::move(d)), + predicate_(std::move(p)) + { + } + + absl::string_view text() const + { + return text_; + } + const Delimiter& delimiter() const + { + return delimiter_; + } + const Predicate& predicate() const + { + return predicate_; + } + + // Range functions that iterate the split substrings as absl::string_view + // objects. These methods enable a Splitter to be used in a range-based for + // loop. + const_iterator begin() const + { + return {const_iterator::kInitState, this}; + } + const_iterator end() const + { + return {const_iterator::kEndState, this}; + } + + // An implicit conversion operator that is restricted to only those containers + // that the splitter is convertible to. + template< + typename Container, + std::enable_if_t::value && SplitterIsConvertibleTo::value, std::nullptr_t> = nullptr> + // NOLINTNEXTLINE(google-explicit-constructor) + operator Container() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return ConvertToContainer::value>()(*this); + } + + template< + typename Container, + std::enable_if_t::value && SplitterIsConvertibleTo::value, std::nullptr_t> = nullptr> + // NOLINTNEXTLINE(google-explicit-constructor) + operator Container() const + { + return ConvertToContainer::value>()(*this); + } + + // Returns a pair with its .first and .second members set to the first two + // strings returned by the begin() iterator. Either/both of .first and .second + // will be constructed with empty strings if the iterator doesn't have a + // corresponding value. + template::value, std::nullptr_t> = nullptr> + // NOLINTNEXTLINE(google-explicit-constructor) + operator std::pair() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return ConvertToPair(); + } + + template::value, std::nullptr_t> = nullptr> + // NOLINTNEXTLINE(google-explicit-constructor) + operator std::pair() const + { + return ConvertToPair(); + } + + private: + template + std::pair ConvertToPair() const + { + absl::string_view first, second; + auto it = begin(); + if (it != end()) + { + first = *it; + if (++it != end()) + { + second = *it; + } + } + return {First(first), Second(second)}; + } + + // ConvertToContainer is a functor converting a Splitter to the requested + // Container of ValueType. It is specialized below to optimize splitting to + // certain combinations of Container and ValueType. + // + // This base template handles the generic case of storing the split results in + // the requested non-map-like container and converting the split substrings to + // the requested type. + template + struct ConvertToContainer + { + Container operator()(const Splitter& splitter) const + { + Container c; + auto it = std::inserter(c, c.end()); + for (const auto& sp : splitter) + { + *it++ = ValueType(sp); + } + return c; + } + }; + + // Partial specialization for a std::vector. + // + // Optimized for the common case of splitting to a + // std::vector. In this case we first split the results to + // a small array of absl::string_view on the stack, to reduce reallocations. + template + struct ConvertToContainer, absl::string_view, false> + { + std::vector operator()( + const Splitter& splitter + ) const + { + struct raw_view + { + const char* data; + size_t size; + operator absl::string_view() const + { // NOLINT(runtime/explicit) + return {data, size}; + } + }; + std::vector v; + std::array ar; + for (auto it = splitter.begin(); !it.at_end();) + { + size_t index = 0; + do + { + ar[index].data = it->data(); + ar[index].size = it->size(); + ++it; + } while (++index != ar.size() && !it.at_end()); + v.insert(v.end(), ar.begin(), ar.begin() + index); + } + return v; + } + }; + + // Partial specialization for a std::vector. + // + // Optimized for the common case of splitting to a std::vector. + // In this case we first split the results to a std::vector + // so the returned std::vector can have space reserved to avoid + // std::string moves. + template + struct ConvertToContainer, std::string, false> + { + std::vector operator()(const Splitter& splitter) const + { + const std::vector v = splitter; + return std::vector(v.begin(), v.end()); + } + }; + + // Partial specialization for containers of pairs (e.g., maps). + // + // The algorithm is to insert a new pair into the map for each even-numbered + // item, with the even-numbered item as the key with a default-constructed + // value. Each odd-numbered item will then be assigned to the last pair's + // value. + template + struct ConvertToContainer, true> + { + using iterator = typename Container::iterator; + + Container operator()(const Splitter& splitter) const + { + Container m; + iterator it; + bool insert = true; + for (const absl::string_view sv : splitter) + { + if (insert) + { + it = InsertOrEmplace(&m, sv); + } + else + { + it->second = Second(sv); + } + insert = !insert; + } + return m; + } + + // Inserts the key and an empty value into the map, returning an iterator to + // the inserted item. We use emplace() if available, otherwise insert(). + template + static absl::enable_if_t::value, iterator> InsertOrEmplace( + M* m, absl::string_view key + ) + { + // Use piecewise_construct to support old versions of gcc in which pair + // constructor can't otherwise construct string from string_view. + return ToIter(m->emplace(std::piecewise_construct, std::make_tuple(key), std::tuple<>())); + } + template + static absl::enable_if_t::value, iterator> InsertOrEmplace( + M* m, absl::string_view key + ) + { + return ToIter(m->insert(std::make_pair(First(key), Second("")))); + } + + static iterator ToIter(std::pair pair) + { + return pair.first; + } + static iterator ToIter(iterator iter) + { + return iter; + } + }; + + StringType text_; + Delimiter delimiter_; + Predicate predicate_; + }; + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/string_constant.h b/CAPI/cpp/grpc/include/absl/strings/internal/string_constant.h new file mode 100644 index 00000000..3a51970b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/string_constant.h @@ -0,0 +1,79 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_ +#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_ + +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // StringConstant represents a compile time string constant. + // It can be accessed via its `absl::string_view value` static member. + // It is guaranteed that the `string_view` returned has constant `.data()`, + // constant `.size()` and constant `value[i]` for all `0 <= i < .size()` + // + // The `T` is an opaque type. It is guaranteed that different string constants + // will have different values of `T`. This allows users to associate the string + // constant with other static state at compile time. + // + // Instances should be made using the `MakeStringConstant()` factory function + // below. + template + struct StringConstant + { + private: + static constexpr bool TryConstexprEval(absl::string_view view) + { + return view.empty() || 2 * view[0] != 1; + } + + public: + static constexpr absl::string_view value = T{}(); + constexpr absl::string_view operator()() const + { + return value; + } + + // Check to be sure `view` points to constant data. + // Otherwise, it can't be constant evaluated. + static_assert(TryConstexprEval(value), "The input string_view must point to constant data."); + }; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + template + constexpr absl::string_view StringConstant::value; +#endif + + // Factory function for `StringConstant` instances. + // It supports callables that have a constexpr default constructor and a + // constexpr operator(). + // It must return an `absl::string_view` or `const char*` pointing to constant + // data. This is validated at compile time. + template + constexpr StringConstant MakeStringConstant(T) + { + return {}; + } + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/stringify_sink.h b/CAPI/cpp/grpc/include/absl/strings/internal/stringify_sink.h new file mode 100644 index 00000000..f51b3a53 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/stringify_sink.h @@ -0,0 +1,62 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ +#define ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace strings_internal + { + class StringifySink + { + public: + void Append(size_t count, char ch); + + void Append(string_view v); + + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(StringifySink* sink, absl::string_view v) + { + sink->Append(v); + } + + private: + template + friend string_view ExtractStringification(StringifySink& sink, const T& v); + + std::string buffer_; + }; + + template + string_view ExtractStringification(StringifySink& sink, const T& v) + { + AbslStringify(sink, v); + return sink.buffer_; + } + + } // namespace strings_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_STRINGIFY_SINK_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/internal/utf8.h b/CAPI/cpp/grpc/include/absl/strings/internal/utf8.h new file mode 100644 index 00000000..394a4c83 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/internal/utf8.h @@ -0,0 +1,55 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UTF8 utilities, implemented to reduce dependencies. + +#ifndef ABSL_STRINGS_INTERNAL_UTF8_H_ +#define ABSL_STRINGS_INTERNAL_UTF8_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace strings_internal + { + + // For Unicode code points 0 through 0x10FFFF, EncodeUTF8Char writes + // out the UTF-8 encoding into buffer, and returns the number of chars + // it wrote. + // + // As described in https://tools.ietf.org/html/rfc3629#section-3 , the encodings + // are: + // 00 - 7F : 0xxxxxxx + // 80 - 7FF : 110xxxxx 10xxxxxx + // 800 - FFFF : 1110xxxx 10xxxxxx 10xxxxxx + // 10000 - 10FFFF : 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Values greater than 0x10FFFF are not supported and may or may not write + // characters into buffer, however never will more than kMaxEncodedUTF8Size + // bytes be written, regardless of the value of utf8_char. + enum + { + kMaxEncodedUTF8Size = 4 + }; + size_t EncodeUTF8Char(char* buffer, char32_t utf8_char); + + } // namespace strings_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_INTERNAL_UTF8_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/match.h b/CAPI/cpp/grpc/include/absl/strings/match.h new file mode 100644 index 00000000..74ac9943 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/match.h @@ -0,0 +1,113 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: match.h +// ----------------------------------------------------------------------------- +// +// This file contains simple utilities for performing string matching checks. +// All of these function parameters are specified as `absl::string_view`, +// meaning that these functions can accept `std::string`, `absl::string_view` or +// NUL-terminated C-style strings. +// +// Examples: +// std::string s = "foo"; +// absl::string_view sv = "f"; +// assert(absl::StrContains(s, sv)); +// +// Note: The order of parameters in these functions is designed to mimic the +// order an equivalent member function would exhibit; +// e.g. `s.Contains(x)` ==> `absl::StrContains(s, x). +#ifndef ABSL_STRINGS_MATCH_H_ +#define ABSL_STRINGS_MATCH_H_ + +#include + +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // StrContains() + // + // Returns whether a given string `haystack` contains the substring `needle`. + inline bool StrContains(absl::string_view haystack, absl::string_view needle) noexcept + { + return haystack.find(needle, 0) != haystack.npos; + } + + inline bool StrContains(absl::string_view haystack, char needle) noexcept + { + return haystack.find(needle) != haystack.npos; + } + + // StartsWith() + // + // Returns whether a given string `text` begins with `prefix`. + inline bool StartsWith(absl::string_view text, absl::string_view prefix) noexcept + { + return prefix.empty() || + (text.size() >= prefix.size() && + memcmp(text.data(), prefix.data(), prefix.size()) == 0); + } + + // EndsWith() + // + // Returns whether a given string `text` ends with `suffix`. + inline bool EndsWith(absl::string_view text, absl::string_view suffix) noexcept + { + return suffix.empty() || + (text.size() >= suffix.size() && + memcmp(text.data() + (text.size() - suffix.size()), suffix.data(), suffix.size()) == 0); + } + // StrContainsIgnoreCase() + // + // Returns whether a given ASCII string `haystack` contains the ASCII substring + // `needle`, ignoring case in the comparison. + bool StrContainsIgnoreCase(absl::string_view haystack, absl::string_view needle) noexcept; + + bool StrContainsIgnoreCase(absl::string_view haystack, char needle) noexcept; + + // EqualsIgnoreCase() + // + // Returns whether given ASCII strings `piece1` and `piece2` are equal, ignoring + // case in the comparison. + bool EqualsIgnoreCase(absl::string_view piece1, absl::string_view piece2) noexcept; + + // StartsWithIgnoreCase() + // + // Returns whether a given ASCII string `text` starts with `prefix`, + // ignoring case in the comparison. + bool StartsWithIgnoreCase(absl::string_view text, absl::string_view prefix) noexcept; + + // EndsWithIgnoreCase() + // + // Returns whether a given ASCII string `text` ends with `suffix`, ignoring + // case in the comparison. + bool EndsWithIgnoreCase(absl::string_view text, absl::string_view suffix) noexcept; + + // Yields the longest prefix in common between both input strings. + // Pointer-wise, the returned result is a subset of input "a". + absl::string_view FindLongestCommonPrefix(absl::string_view a, absl::string_view b); + + // Yields the longest suffix in common between both input strings. + // Pointer-wise, the returned result is a subset of input "a". + absl::string_view FindLongestCommonSuffix(absl::string_view a, absl::string_view b); + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_MATCH_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/numbers.h b/CAPI/cpp/grpc/include/absl/strings/numbers.h new file mode 100644 index 00000000..511597a2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/numbers.h @@ -0,0 +1,316 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: numbers.h +// ----------------------------------------------------------------------------- +// +// This package contains functions for converting strings to numbers. For +// converting numbers to strings, use `StrCat()` or `StrAppend()` in str_cat.h, +// which automatically detect and convert most number values appropriately. + +#ifndef ABSL_STRINGS_NUMBERS_H_ +#define ABSL_STRINGS_NUMBERS_H_ + +#ifdef __SSSE3__ +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // SimpleAtoi() + // + // Converts the given string (optionally followed or preceded by ASCII + // whitespace) into an integer value, returning `true` if successful. The string + // must reflect a base-10 integer whose value falls within the range of the + // integer type (optionally preceded by a `+` or `-`). If any errors are + // encountered, this function returns `false`, leaving `out` in an unspecified + // state. + template + ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str, int_type* out); + + // SimpleAtof() + // + // Converts the given string (optionally followed or preceded by ASCII + // whitespace) into a float, which may be rounded on overflow or underflow, + // returning `true` if successful. + // See https://en.cppreference.com/w/c/string/byte/strtof for details about the + // allowed formats for `str`, except SimpleAtof() is locale-independent and will + // always use the "C" locale. If any errors are encountered, this function + // returns `false`, leaving `out` in an unspecified state. + ABSL_MUST_USE_RESULT bool SimpleAtof(absl::string_view str, float* out); + + // SimpleAtod() + // + // Converts the given string (optionally followed or preceded by ASCII + // whitespace) into a double, which may be rounded on overflow or underflow, + // returning `true` if successful. + // See https://en.cppreference.com/w/c/string/byte/strtof for details about the + // allowed formats for `str`, except SimpleAtod is locale-independent and will + // always use the "C" locale. If any errors are encountered, this function + // returns `false`, leaving `out` in an unspecified state. + ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str, double* out); + + // SimpleAtob() + // + // Converts the given string into a boolean, returning `true` if successful. + // The following case-insensitive strings are interpreted as boolean `true`: + // "true", "t", "yes", "y", "1". The following case-insensitive strings + // are interpreted as boolean `false`: "false", "f", "no", "n", "0". If any + // errors are encountered, this function returns `false`, leaving `out` in an + // unspecified state. + ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* out); + + // SimpleHexAtoi() + // + // Converts a hexadecimal string (optionally followed or preceded by ASCII + // whitespace) to an integer, returning `true` if successful. Only valid base-16 + // hexadecimal integers whose value falls within the range of the integer type + // (optionally preceded by a `+` or `-`) can be converted. A valid hexadecimal + // value may include both upper and lowercase character symbols, and may + // optionally include a leading "0x" (or "0X") number prefix, which is ignored + // by this function. If any errors are encountered, this function returns + // `false`, leaving `out` in an unspecified state. + template + ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out); + + // Overloads of SimpleHexAtoi() for 128 bit integers. + ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, absl::int128* out); + ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, absl::uint128* out); + + ABSL_NAMESPACE_END +} // namespace absl + +// End of public API. Implementation details follow. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace numbers_internal + { + + // Digit conversion. + ABSL_DLL extern const char kHexChar[17]; // 0123456789abcdef + ABSL_DLL extern const char + kHexTable[513]; // 000102030405060708090a0b0c0d0e0f1011... + + // Writes a two-character representation of 'i' to 'buf'. 'i' must be in the + // range 0 <= i < 100, and buf must have space for two characters. Example: + // char buf[2]; + // PutTwoDigits(42, buf); + // // buf[0] == '4' + // // buf[1] == '2' + void PutTwoDigits(uint32_t i, char* buf); + + // safe_strto?() functions for implementing SimpleAtoi() + + bool safe_strto32_base(absl::string_view text, int32_t* value, int base); + bool safe_strto64_base(absl::string_view text, int64_t* value, int base); + bool safe_strto128_base(absl::string_view text, absl::int128* value, int base); + bool safe_strtou32_base(absl::string_view text, uint32_t* value, int base); + bool safe_strtou64_base(absl::string_view text, uint64_t* value, int base); + bool safe_strtou128_base(absl::string_view text, absl::uint128* value, int base); + + static const int kFastToBufferSize = 32; + static const int kSixDigitsToBufferSize = 16; + + // Helper function for fast formatting of floating-point values. + // The result is the same as printf's "%g", a.k.a. "%.6g"; that is, six + // significant digits are returned, trailing zeros are removed, and numbers + // outside the range 0.0001-999999 are output using scientific notation + // (1.23456e+06). This routine is heavily optimized. + // Required buffer size is `kSixDigitsToBufferSize`. + size_t SixDigitsToBuffer(double d, char* buffer); + + // These functions are intended for speed. All functions take an output buffer + // as an argument and return a pointer to the last byte they wrote, which is the + // terminating '\0'. At most `kFastToBufferSize` bytes are written. + char* FastIntToBuffer(int32_t, char*); + char* FastIntToBuffer(uint32_t, char*); + char* FastIntToBuffer(int64_t, char*); + char* FastIntToBuffer(uint64_t, char*); + + // For enums and integer types that are not an exact match for the types above, + // use templates to call the appropriate one of the four overloads above. + template + char* FastIntToBuffer(int_type i, char* buffer) + { + static_assert(sizeof(i) <= 64 / 8, "FastIntToBuffer works only with 64-bit-or-less integers."); + // TODO(jorg): This signed-ness check is used because it works correctly + // with enums, and it also serves to check that int_type is not a pointer. + // If one day something like std::is_signed works, switch to it. + // These conditions are constexpr bools to suppress MSVC warning C4127. + constexpr bool kIsSigned = static_cast(1) - 2 < 0; + constexpr bool kUse64Bit = sizeof(i) > 32 / 8; + if (kIsSigned) + { + if (kUse64Bit) + { + return FastIntToBuffer(static_cast(i), buffer); + } + else + { + return FastIntToBuffer(static_cast(i), buffer); + } + } + else + { + if (kUse64Bit) + { + return FastIntToBuffer(static_cast(i), buffer); + } + else + { + return FastIntToBuffer(static_cast(i), buffer); + } + } + } + + // Implementation of SimpleAtoi, generalized to support arbitrary base (used + // with base different from 10 elsewhere in Abseil implementation). + template + ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s, int_type* out, int base) + { + static_assert(sizeof(*out) == 4 || sizeof(*out) == 8, "SimpleAtoi works only with 32-bit or 64-bit integers."); + static_assert(!std::is_floating_point::value, "Use SimpleAtof or SimpleAtod instead."); + bool parsed; + // TODO(jorg): This signed-ness check is used because it works correctly + // with enums, and it also serves to check that int_type is not a pointer. + // If one day something like std::is_signed works, switch to it. + // These conditions are constexpr bools to suppress MSVC warning C4127. + constexpr bool kIsSigned = static_cast(1) - 2 < 0; + constexpr bool kUse64Bit = sizeof(*out) == 64 / 8; + if (kIsSigned) + { + if (kUse64Bit) + { + int64_t val; + parsed = numbers_internal::safe_strto64_base(s, &val, base); + *out = static_cast(val); + } + else + { + int32_t val; + parsed = numbers_internal::safe_strto32_base(s, &val, base); + *out = static_cast(val); + } + } + else + { + if (kUse64Bit) + { + uint64_t val; + parsed = numbers_internal::safe_strtou64_base(s, &val, base); + *out = static_cast(val); + } + else + { + uint32_t val; + parsed = numbers_internal::safe_strtou32_base(s, &val, base); + *out = static_cast(val); + } + } + return parsed; + } + + // FastHexToBufferZeroPad16() + // + // Outputs `val` into `out` as if by `snprintf(out, 17, "%016x", val)` but + // without the terminating null character. Thus `out` must be of length >= 16. + // Returns the number of non-pad digits of the output (it can never be zero + // since 0 has one digit). + inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) + { +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + uint64_t be = absl::big_endian::FromHost64(val); + const auto kNibbleMask = _mm_set1_epi8(0xf); + const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); + auto v = _mm_loadl_epi64(reinterpret_cast<__m128i*>(&be)); // load lo dword + auto v4 = _mm_srli_epi64(v, 4); // shift 4 right + auto il = _mm_unpacklo_epi8(v4, v); // interleave bytes + auto m = _mm_and_si128(il, kNibbleMask); // mask out nibbles + auto hexchars = _mm_shuffle_epi8(kHexDigits, m); // hex chars + _mm_storeu_si128(reinterpret_cast<__m128i*>(out), hexchars); +#else + for (int i = 0; i < 8; ++i) + { + auto byte = (val >> (56 - 8 * i)) & 0xFF; + auto* hex = &absl::numbers_internal::kHexTable[byte * 2]; + std::memcpy(out + 2 * i, hex, 2); + } +#endif + // | 0x1 so that even 0 has 1 digit. + return 16 - static_cast(countl_zero(val | 0x1) / 4); + } + + } // namespace numbers_internal + + template + ABSL_MUST_USE_RESULT bool SimpleAtoi(absl::string_view str, int_type* out) + { + return numbers_internal::safe_strtoi_base(str, out, 10); + } + + ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str, absl::int128* out) + { + return numbers_internal::safe_strto128_base(str, out, 10); + } + + ABSL_MUST_USE_RESULT inline bool SimpleAtoi(absl::string_view str, absl::uint128* out) + { + return numbers_internal::safe_strtou128_base(str, out, 10); + } + + template + ABSL_MUST_USE_RESULT bool SimpleHexAtoi(absl::string_view str, int_type* out) + { + return numbers_internal::safe_strtoi_base(str, out, 16); + } + + ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, absl::int128* out) + { + return numbers_internal::safe_strto128_base(str, out, 16); + } + + ABSL_MUST_USE_RESULT inline bool SimpleHexAtoi(absl::string_view str, absl::uint128* out) + { + return numbers_internal::safe_strtou128_base(str, out, 16); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_NUMBERS_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/str_cat.h b/CAPI/cpp/grpc/include/absl/strings/str_cat.h new file mode 100644 index 00000000..74ef63f5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/str_cat.h @@ -0,0 +1,591 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: str_cat.h +// ----------------------------------------------------------------------------- +// +// This package contains functions for efficiently concatenating and appending +// strings: `StrCat()` and `StrAppend()`. Most of the work within these routines +// is actually handled through use of a special AlphaNum type, which was +// designed to be used as a parameter type that efficiently manages conversion +// to strings and avoids copies in the above operations. +// +// Any routine accepting either a string or a number may accept `AlphaNum`. +// The basic idea is that by accepting a `const AlphaNum &` as an argument +// to your function, your callers will automagically convert bools, integers, +// and floating point values to strings for you. +// +// NOTE: Use of `AlphaNum` outside of the //absl/strings package is unsupported +// except for the specific case of function parameters of type `AlphaNum` or +// `const AlphaNum &`. In particular, instantiating `AlphaNum` directly as a +// stack variable is not supported. +// +// Conversion from 8-bit values is not accepted because, if it were, then an +// attempt to pass ':' instead of ":" might result in a 58 ending up in your +// result. +// +// Bools convert to "0" or "1". Pointers to types other than `char *` are not +// valid inputs. No output is generated for null `char *` pointers. +// +// Floating point numbers are formatted with six-digit precision, which is +// the default for "std::cout <<" or printf "%g" (the same as "%.6g"). +// +// You can convert to hexadecimal output rather than decimal output using the +// `Hex` type contained here. To do so, pass `Hex(my_int)` as a parameter to +// `StrCat()` or `StrAppend()`. You may specify a minimum hex field width using +// a `PadSpec` enum. +// +// User-defined types can be formatted with the `AbslStringify()` customization +// point. The API relies on detecting an overload in the user-defined type's +// namespace of a free (non-member) `AbslStringify()` function as a definition +// (typically declared as a friend and implemented in-line. +// with the following signature: +// +// class MyClass { ... }; +// +// template +// void AbslStringify(Sink& sink, const MyClass& value); +// +// An `AbslStringify()` overload for a type should only be declared in the same +// file and namespace as said type. +// +// Note that `AbslStringify()` also supports use with `absl::StrFormat()` and +// `absl::Substitute()`. +// +// Example: +// +// struct Point { +// // To add formatting support to `Point`, we simply need to add a free +// // (non-member) function `AbslStringify()`. This method specifies how +// // Point should be printed when absl::StrCat() is called on it. You can add +// // such a free function using a friend declaration within the body of the +// // class. The sink parameter is a templated type to avoid requiring +// // dependencies. +// template friend void AbslStringify(Sink& +// sink, const Point& p) { +// absl::Format(&sink, "(%v, %v)", p.x, p.y); +// } +// +// int x; +// int y; +// }; +// ----------------------------------------------------------------------------- + +#ifndef ABSL_STRINGS_STR_CAT_H_ +#define ABSL_STRINGS_STR_CAT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/port.h" +#include "absl/strings/internal/has_absl_stringify.h" +#include "absl/strings/internal/stringify_sink.h" +#include "absl/strings/numbers.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace strings_internal + { + // AlphaNumBuffer allows a way to pass a string to StrCat without having to do + // memory allocation. It is simply a pair of a fixed-size character array, and + // a size. Please don't use outside of absl, yet. + template + struct AlphaNumBuffer + { + std::array data; + size_t size; + }; + + } // namespace strings_internal + + // Enum that specifies the number of significant digits to return in a `Hex` or + // `Dec` conversion and fill character to use. A `kZeroPad2` value, for example, + // would produce hexadecimal strings such as "0a","0f" and a 'kSpacePad5' value + // would produce hexadecimal strings such as " a"," f". + enum PadSpec : uint8_t + { + kNoPad = 1, + kZeroPad2, + kZeroPad3, + kZeroPad4, + kZeroPad5, + kZeroPad6, + kZeroPad7, + kZeroPad8, + kZeroPad9, + kZeroPad10, + kZeroPad11, + kZeroPad12, + kZeroPad13, + kZeroPad14, + kZeroPad15, + kZeroPad16, + kZeroPad17, + kZeroPad18, + kZeroPad19, + kZeroPad20, + + kSpacePad2 = kZeroPad2 + 64, + kSpacePad3, + kSpacePad4, + kSpacePad5, + kSpacePad6, + kSpacePad7, + kSpacePad8, + kSpacePad9, + kSpacePad10, + kSpacePad11, + kSpacePad12, + kSpacePad13, + kSpacePad14, + kSpacePad15, + kSpacePad16, + kSpacePad17, + kSpacePad18, + kSpacePad19, + kSpacePad20, + }; + + // ----------------------------------------------------------------------------- + // Hex + // ----------------------------------------------------------------------------- + // + // `Hex` stores a set of hexadecimal string conversion parameters for use + // within `AlphaNum` string conversions. + struct Hex + { + uint64_t value; + uint8_t width; + char fill; + + template + explicit Hex( + Int v, PadSpec spec = absl::kNoPad, typename std::enable_if::value>::type* = nullptr + ) : + Hex(spec, static_cast(v)) + { + } + template + explicit Hex( + Int v, PadSpec spec = absl::kNoPad, typename std::enable_if::value>::type* = nullptr + ) : + Hex(spec, static_cast(v)) + { + } + template + explicit Hex( + Int v, PadSpec spec = absl::kNoPad, typename std::enable_if::value>::type* = nullptr + ) : + Hex(spec, static_cast(v)) + { + } + template + explicit Hex( + Int v, PadSpec spec = absl::kNoPad, typename std::enable_if::value>::type* = nullptr + ) : + Hex(spec, static_cast(v)) + { + } + template + explicit Hex(Pointee* v, PadSpec spec = absl::kNoPad) : + Hex(spec, reinterpret_cast(v)) + { + } + + template + friend void AbslStringify(S& sink, Hex hex) + { + static_assert( + numbers_internal::kFastToBufferSize >= 32, + "This function only works when output buffer >= 32 bytes long" + ); + char buffer[numbers_internal::kFastToBufferSize]; + char* const end = &buffer[numbers_internal::kFastToBufferSize]; + auto real_width = + absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16); + if (real_width >= hex.width) + { + sink.Append(absl::string_view(end - real_width, real_width)); + } + else + { + // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and + // max pad width can be up to 20. + std::memset(end - 32, hex.fill, 16); + // Patch up everything else up to the real_width. + std::memset(end - real_width - 16, hex.fill, 16); + sink.Append(absl::string_view(end - hex.width, hex.width)); + } + } + + private: + Hex(PadSpec spec, uint64_t v) : + value(v), + width(spec == absl::kNoPad ? 1 : spec >= absl::kSpacePad2 ? spec - absl::kSpacePad2 + 2 : + spec - absl::kZeroPad2 + 2), + fill(spec >= absl::kSpacePad2 ? ' ' : '0') + { + } + }; + + // ----------------------------------------------------------------------------- + // Dec + // ----------------------------------------------------------------------------- + // + // `Dec` stores a set of decimal string conversion parameters for use + // within `AlphaNum` string conversions. Dec is slower than the default + // integer conversion, so use it only if you need padding. + struct Dec + { + uint64_t value; + uint8_t width; + char fill; + bool neg; + + template + explicit Dec(Int v, PadSpec spec = absl::kNoPad, typename std::enable_if<(sizeof(Int) <= 8)>::type* = nullptr) : + value(v >= 0 ? static_cast(v) : uint64_t{0} - static_cast(v)), + width(spec == absl::kNoPad ? 1 : spec >= absl::kSpacePad2 ? spec - absl::kSpacePad2 + 2 : + spec - absl::kZeroPad2 + 2), + fill(spec >= absl::kSpacePad2 ? ' ' : '0'), + neg(v < 0) + { + } + + template + friend void AbslStringify(S& sink, Dec dec) + { + assert(dec.width <= numbers_internal::kFastToBufferSize); + char buffer[numbers_internal::kFastToBufferSize]; + char* const end = &buffer[numbers_internal::kFastToBufferSize]; + char* const minfill = end - dec.width; + char* writer = end; + uint64_t val = dec.value; + while (val > 9) + { + *--writer = '0' + (val % 10); + val /= 10; + } + *--writer = '0' + static_cast(val); + if (dec.neg) + *--writer = '-'; + + ptrdiff_t fillers = writer - minfill; + if (fillers > 0) + { + // Tricky: if the fill character is ' ', then it's <+/-> + // But...: if the fill character is '0', then it's <+/-> + bool add_sign_again = false; + if (dec.neg && dec.fill == '0') + { // If filling with '0', + ++writer; // ignore the sign we just added + add_sign_again = true; // and re-add the sign later. + } + writer -= fillers; + std::fill_n(writer, fillers, dec.fill); + if (add_sign_again) + *--writer = '-'; + } + + sink.Append(absl::string_view(writer, static_cast(end - writer))); + } + }; + + // ----------------------------------------------------------------------------- + // AlphaNum + // ----------------------------------------------------------------------------- + // + // The `AlphaNum` class acts as the main parameter type for `StrCat()` and + // `StrAppend()`, providing efficient conversion of numeric, boolean, decimal, + // and hexadecimal values (through the `Dec` and `Hex` types) into strings. + // `AlphaNum` should only be used as a function parameter. Do not instantiate + // `AlphaNum` directly as a stack variable. + + class AlphaNum + { + public: + // No bool ctor -- bools convert to an integral type. + // A bool ctor would also convert incoming pointers (bletch). + + AlphaNum(int x) // NOLINT(runtime/explicit) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + AlphaNum(unsigned int x) // NOLINT(runtime/explicit) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + AlphaNum(long x) // NOLINT(*) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + AlphaNum(unsigned long x) // NOLINT(*) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + AlphaNum(long long x) // NOLINT(*) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + AlphaNum(unsigned long long x) // NOLINT(*) + : + piece_(digits_, static_cast(numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0])) + { + } + + AlphaNum(float f) // NOLINT(runtime/explicit) + : + piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) + { + } + AlphaNum(double f) // NOLINT(runtime/explicit) + : + piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) + { + } + + template + AlphaNum( // NOLINT(runtime/explicit) + const strings_internal::AlphaNumBuffer& buf + ABSL_ATTRIBUTE_LIFETIME_BOUND + ) : + piece_(&buf.data[0], buf.size) + { + } + + AlphaNum(const char* c_str // NOLINT(runtime/explicit) + ABSL_ATTRIBUTE_LIFETIME_BOUND) : + piece_(NullSafeStringView(c_str)) + { + } + AlphaNum(absl::string_view pc // NOLINT(runtime/explicit) + ABSL_ATTRIBUTE_LIFETIME_BOUND) : + piece_(pc) + { + } + + template::value>::type> + AlphaNum( // NOLINT(runtime/explicit) + const T& v ABSL_ATTRIBUTE_LIFETIME_BOUND, + strings_internal::StringifySink&& sink ABSL_ATTRIBUTE_LIFETIME_BOUND = {} + ) : + piece_(strings_internal::ExtractStringification(sink, v)) + { + } + + template + AlphaNum( // NOLINT(runtime/explicit) + const std::basic_string, Allocator>& str + ABSL_ATTRIBUTE_LIFETIME_BOUND + ) : + piece_(str) + { + } + + // Use string literals ":" instead of character literals ':'. + AlphaNum(char c) = delete; // NOLINT(runtime/explicit) + + AlphaNum(const AlphaNum&) = delete; + AlphaNum& operator=(const AlphaNum&) = delete; + + absl::string_view::size_type size() const + { + return piece_.size(); + } + const char* data() const + { + return piece_.data(); + } + absl::string_view Piece() const + { + return piece_; + } + + // Match unscoped enums. Use integral promotion so that a `char`-backed + // enum becomes a wider integral type AlphaNum will accept. + template{} && std::is_convertible{} && !strings_internal::HasAbslStringify::value>::type> + AlphaNum(T e) // NOLINT(runtime/explicit) + : + AlphaNum(+e) + { + } + + // This overload matches scoped enums. We must explicitly cast to the + // underlying type, but use integral promotion for the same reason as above. + template{} && !std::is_convertible{} && !strings_internal::HasAbslStringify::value, + char*>::type = nullptr> + AlphaNum(T e) // NOLINT(runtime/explicit) + : + AlphaNum(+static_cast::type>(e)) + { + } + + // vector::reference and const_reference require special help to + // convert to `AlphaNum` because it requires two user defined conversions. + template< + typename T, + typename std::enable_if< + std::is_class::value && + (std::is_same::reference>::value || + std::is_same::const_reference>::value)>::type* = + nullptr> + AlphaNum(T e) : + AlphaNum(static_cast(e)) + { + } // NOLINT(runtime/explicit) + + private: + absl::string_view piece_; + char digits_[numbers_internal::kFastToBufferSize]; + }; + + // ----------------------------------------------------------------------------- + // StrCat() + // ----------------------------------------------------------------------------- + // + // Merges given strings or numbers, using no delimiter(s), returning the merged + // result as a string. + // + // `StrCat()` is designed to be the fastest possible way to construct a string + // out of a mix of raw C strings, string_views, strings, bool values, + // and numeric values. + // + // Don't use `StrCat()` for user-visible strings. The localization process + // works poorly on strings built up out of fragments. + // + // For clarity and performance, don't use `StrCat()` when appending to a + // string. Use `StrAppend()` instead. In particular, avoid using any of these + // (anti-)patterns: + // + // str.append(StrCat(...)) + // str += StrCat(...) + // str = StrCat(str, ...) + // + // The last case is the worst, with a potential to change a loop + // from a linear time operation with O(1) dynamic allocations into a + // quadratic time operation with O(n) dynamic allocations. + // + // See `StrAppend()` below for more information. + + namespace strings_internal + { + + // Do not call directly - this is not part of the public API. + std::string CatPieces(std::initializer_list pieces); + void AppendPieces(std::string* dest, std::initializer_list pieces); + + } // namespace strings_internal + + ABSL_MUST_USE_RESULT inline std::string StrCat() + { + return std::string(); + } + + ABSL_MUST_USE_RESULT inline std::string StrCat(const AlphaNum& a) + { + return std::string(a.data(), a.size()); + } + + ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b); + ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c); + ABSL_MUST_USE_RESULT std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d); + + // Support 5 or more arguments + template + ABSL_MUST_USE_RESULT inline std::string StrCat( + const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d, const AlphaNum& e, const AV&... args + ) + { + return strings_internal::CatPieces( + {a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(), static_cast(args).Piece()...} + ); + } + + // ----------------------------------------------------------------------------- + // StrAppend() + // ----------------------------------------------------------------------------- + // + // Appends a string or set of strings to an existing string, in a similar + // fashion to `StrCat()`. + // + // WARNING: `StrAppend(&str, a, b, c, ...)` requires that none of the + // a, b, c, parameters be a reference into str. For speed, `StrAppend()` does + // not try to check each of its input arguments to be sure that they are not + // a subset of the string being appended to. That is, while this will work: + // + // std::string s = "foo"; + // s += s; + // + // This output is undefined: + // + // std::string s = "foo"; + // StrAppend(&s, s); + // + // This output is undefined as well, since `absl::string_view` does not own its + // data: + // + // std::string s = "foobar"; + // absl::string_view p = s; + // StrAppend(&s, p); + + inline void StrAppend(std::string*) + { + } + void StrAppend(std::string* dest, const AlphaNum& a); + void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b); + void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b, const AlphaNum& c); + void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d); + + // Support 5 or more arguments + template + inline void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d, const AlphaNum& e, const AV&... args) + { + strings_internal::AppendPieces( + dest, {a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(), static_cast(args).Piece()...} + ); + } + + // Helper function for the future StrCat default floating-point format, %.6g + // This is fast. + inline strings_internal::AlphaNumBuffer< + numbers_internal::kSixDigitsToBufferSize> + SixDigits(double d) + { + strings_internal::AlphaNumBuffer + result; + result.size = numbers_internal::SixDigitsToBuffer(d, &result.data[0]); + return result; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STR_CAT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/str_format.h b/CAPI/cpp/grpc/include/absl/strings/str_format.h new file mode 100644 index 00000000..0893b37c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/str_format.h @@ -0,0 +1,945 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: str_format.h +// ----------------------------------------------------------------------------- +// +// The `str_format` library is a typesafe replacement for the family of +// `printf()` string formatting routines within the `` standard library +// header. Like the `printf` family, `str_format` uses a "format string" to +// perform argument substitutions based on types. See the `FormatSpec` section +// below for format string documentation. +// +// Example: +// +// std::string s = absl::StrFormat( +// "%s %s You have $%d!", "Hello", name, dollars); +// +// The library consists of the following basic utilities: +// +// * `absl::StrFormat()`, a type-safe replacement for `std::sprintf()`, to +// write a format string to a `string` value. +// * `absl::StrAppendFormat()` to append a format string to a `string` +// * `absl::StreamFormat()` to more efficiently write a format string to a +// stream, such as`std::cout`. +// * `absl::PrintF()`, `absl::FPrintF()` and `absl::SNPrintF()` as +// drop-in replacements for `std::printf()`, `std::fprintf()` and +// `std::snprintf()`. +// +// Note: An `absl::SPrintF()` drop-in replacement is not supported as it +// is generally unsafe due to buffer overflows. Use `absl::StrFormat` which +// returns the string as output instead of expecting a pre-allocated buffer. +// +// Additionally, you can provide a format string (and its associated arguments) +// using one of the following abstractions: +// +// * A `FormatSpec` class template fully encapsulates a format string and its +// type arguments and is usually provided to `str_format` functions as a +// variadic argument of type `FormatSpec`. The `FormatSpec` +// template is evaluated at compile-time, providing type safety. +// * A `ParsedFormat` instance, which encapsulates a specific, pre-compiled +// format string for a specific set of type(s), and which can be passed +// between API boundaries. (The `FormatSpec` type should not be used +// directly except as an argument type for wrapper functions.) +// +// The `str_format` library provides the ability to output its format strings to +// arbitrary sink types: +// +// * A generic `Format()` function to write outputs to arbitrary sink types, +// which must implement a `FormatRawSink` interface. +// +// * A `FormatUntyped()` function that is similar to `Format()` except it is +// loosely typed. `FormatUntyped()` is not a template and does not perform +// any compile-time checking of the format string; instead, it returns a +// boolean from a runtime check. +// +// In addition, the `str_format` library provides extension points for +// augmenting formatting to new types. See "StrFormat Extensions" below. + +#ifndef ABSL_STRINGS_STR_FORMAT_H_ +#define ABSL_STRINGS_STR_FORMAT_H_ + +#include +#include + +#include "absl/strings/internal/str_format/arg.h" // IWYU pragma: export +#include "absl/strings/internal/str_format/bind.h" // IWYU pragma: export +#include "absl/strings/internal/str_format/checker.h" // IWYU pragma: export +#include "absl/strings/internal/str_format/extension.h" // IWYU pragma: export +#include "absl/strings/internal/str_format/parser.h" // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // UntypedFormatSpec + // + // A type-erased class that can be used directly within untyped API entry + // points. An `UntypedFormatSpec` is specifically used as an argument to + // `FormatUntyped()`. + // + // Example: + // + // absl::UntypedFormatSpec format("%d"); + // std::string out; + // CHECK(absl::FormatUntyped(&out, format, {absl::FormatArg(1)})); + class UntypedFormatSpec + { + public: + UntypedFormatSpec() = delete; + UntypedFormatSpec(const UntypedFormatSpec&) = delete; + UntypedFormatSpec& operator=(const UntypedFormatSpec&) = delete; + + explicit UntypedFormatSpec(string_view s) : + spec_(s) + { + } + + protected: + explicit UntypedFormatSpec(const str_format_internal::ParsedFormatBase* pc) : + spec_(pc) + { + } + + private: + friend str_format_internal::UntypedFormatSpecImpl; + str_format_internal::UntypedFormatSpecImpl spec_; + }; + + // FormatStreamed() + // + // Takes a streamable argument and returns an object that can print it + // with '%s'. Allows printing of types that have an `operator<<` but no + // intrinsic type support within `StrFormat()` itself. + // + // Example: + // + // absl::StrFormat("%s", absl::FormatStreamed(obj)); + template + str_format_internal::StreamedWrapper FormatStreamed(const T& v) + { + return str_format_internal::StreamedWrapper(v); + } + + // FormatCountCapture + // + // This class provides a way to safely wrap `StrFormat()` captures of `%n` + // conversions, which denote the number of characters written by a formatting + // operation to this point, into an integer value. + // + // This wrapper is designed to allow safe usage of `%n` within `StrFormat(); in + // the `printf()` family of functions, `%n` is not safe to use, as the `int *` + // buffer can be used to capture arbitrary data. + // + // Example: + // + // int n = 0; + // std::string s = absl::StrFormat("%s%d%n", "hello", 123, + // absl::FormatCountCapture(&n)); + // EXPECT_EQ(8, n); + class FormatCountCapture + { + public: + explicit FormatCountCapture(int* p) : + p_(p) + { + } + + private: + // FormatCountCaptureHelper is used to define FormatConvertImpl() for this + // class. + friend struct str_format_internal::FormatCountCaptureHelper; + // Unused() is here because of the false positive from -Wunused-private-field + // p_ is used in the templated function of the friend FormatCountCaptureHelper + // class. + int* Unused() + { + return p_; + } + int* p_; + }; + + // FormatSpec + // + // The `FormatSpec` type defines the makeup of a format string within the + // `str_format` library. It is a variadic class template that is evaluated at + // compile-time, according to the format string and arguments that are passed to + // it. + // + // You should not need to manipulate this type directly. You should only name it + // if you are writing wrapper functions which accept format arguments that will + // be provided unmodified to functions in this library. Such a wrapper function + // might be a class method that provides format arguments and/or internally uses + // the result of formatting. + // + // For a `FormatSpec` to be valid at compile-time, it must be provided as + // either: + // + // * A `constexpr` literal or `absl::string_view`, which is how it most often + // used. + // * A `ParsedFormat` instantiation, which ensures the format string is + // valid before use. (See below.) + // + // Example: + // + // // Provided as a string literal. + // absl::StrFormat("Welcome to %s, Number %d!", "The Village", 6); + // + // // Provided as a constexpr absl::string_view. + // constexpr absl::string_view formatString = "Welcome to %s, Number %d!"; + // absl::StrFormat(formatString, "The Village", 6); + // + // // Provided as a pre-compiled ParsedFormat object. + // // Note that this example is useful only for illustration purposes. + // absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!"); + // absl::StrFormat(formatString, "TheVillage", 6); + // + // A format string generally follows the POSIX syntax as used within the POSIX + // `printf` specification. (Exceptions are noted below.) + // + // (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html) + // + // In specific, the `FormatSpec` supports the following type specifiers: + // * `c` for characters + // * `s` for strings + // * `d` or `i` for integers + // * `o` for unsigned integer conversions into octal + // * `x` or `X` for unsigned integer conversions into hex + // * `u` for unsigned integers + // * `f` or `F` for floating point values into decimal notation + // * `e` or `E` for floating point values into exponential notation + // * `a` or `A` for floating point values into hex exponential notation + // * `g` or `G` for floating point values into decimal or exponential + // notation based on their precision + // * `p` for pointer address values + // * `n` for the special case of writing out the number of characters + // written to this point. The resulting value must be captured within an + // `absl::FormatCountCapture` type. + // * `v` for values using the default format for a deduced type. These deduced + // types include many of the primitive types denoted here as well as + // user-defined types containing the proper extensions. (See below for more + // information.) + // + // Implementation-defined behavior: + // * A null pointer provided to "%s" or "%p" is output as "(nil)". + // * A non-null pointer provided to "%p" is output in hex as if by %#x or + // %#lx. + // + // NOTE: `o`, `x\X` and `u` will convert signed values to their unsigned + // counterpart before formatting. + // + // Examples: + // "%c", 'a' -> "a" + // "%c", 32 -> " " + // "%s", "C" -> "C" + // "%s", std::string("C++") -> "C++" + // "%d", -10 -> "-10" + // "%o", 10 -> "12" + // "%x", 16 -> "10" + // "%f", 123456789 -> "123456789.000000" + // "%e", .01 -> "1.00000e-2" + // "%a", -3.0 -> "-0x1.8p+1" + // "%g", .01 -> "1e-2" + // "%p", (void*)&value -> "0x7ffdeb6ad2a4" + // + // int n = 0; + // std::string s = absl::StrFormat( + // "%s%d%n", "hello", 123, absl::FormatCountCapture(&n)); + // EXPECT_EQ(8, n); + // + // NOTE: the `v` specifier (for "value") is a type specifier not present in the + // POSIX specification. %v will format values according to their deduced type. + // `v` uses `d` for signed integer values, `u` for unsigned integer values, `g` + // for floating point values, and formats boolean values as "true"/"false" + // (instead of 1 or 0 for booleans formatted using d). `const char*` is not + // supported; please use `std:string` and `string_view`. `char` is also not + // supported due to ambiguity of the type. This specifier does not support + // modifiers. + // + // The `FormatSpec` intrinsically supports all of these fundamental C++ types: + // + // * Characters: `char`, `signed char`, `unsigned char` + // * Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`, + // `unsigned long`, `long long`, `unsigned long long` + // * Enums: printed as their underlying integral value + // * Floating-point: `float`, `double`, `long double` + // + // However, in the `str_format` library, a format conversion specifies a broader + // C++ conceptual category instead of an exact type. For example, `%s` binds to + // any string-like argument, so `std::string`, `absl::string_view`, and + // `const char*` are all accepted. Likewise, `%d` accepts any integer-like + // argument, etc. + + template + using FormatSpec = str_format_internal::FormatSpecTemplate< + str_format_internal::ArgumentToConv()...>; + + // ParsedFormat + // + // A `ParsedFormat` is a class template representing a preparsed `FormatSpec`, + // with template arguments specifying the conversion characters used within the + // format string. Such characters must be valid format type specifiers, and + // these type specifiers are checked at compile-time. + // + // Instances of `ParsedFormat` can be created, copied, and reused to speed up + // formatting loops. A `ParsedFormat` may either be constructed statically, or + // dynamically through its `New()` factory function, which only constructs a + // runtime object if the format is valid at that time. + // + // Example: + // + // // Verified at compile time. + // absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!"); + // absl::StrFormat(formatString, "TheVillage", 6); + // + // // Verified at runtime. + // auto format_runtime = absl::ParsedFormat<'d'>::New(format_string); + // if (format_runtime) { + // value = absl::StrFormat(*format_runtime, i); + // } else { + // ... error case ... + // } + +#if defined(__cpp_nontype_template_parameter_auto) + // If C++17 is available, an 'extended' format is also allowed that can specify + // multiple conversion characters per format argument, using a combination of + // `absl::FormatConversionCharSet` enum values (logically a set union) + // via the `|` operator. (Single character-based arguments are still accepted, + // but cannot be combined). Some common conversions also have predefined enum + // values, such as `absl::FormatConversionCharSet::kIntegral`. + // + // Example: + // // Extended format supports multiple conversion characters per argument, + // // specified via a combination of `FormatConversionCharSet` enums. + // using MyFormat = absl::ParsedFormat; + // MyFormat GetFormat(bool use_hex) { + // if (use_hex) return MyFormat("foo %x bar"); + // return MyFormat("foo %d bar"); + // } + // // `format` can be used with any value that supports 'd' and 'x', + // // like `int`. + // auto format = GetFormat(use_hex); + // value = StringF(format, i); + template + using ParsedFormat = absl::str_format_internal::ExtendedParsedFormat< + absl::str_format_internal::ToFormatConversionCharSet(Conv)...>; +#else + template + using ParsedFormat = str_format_internal::ExtendedParsedFormat< + absl::str_format_internal::ToFormatConversionCharSet(Conv)...>; +#endif // defined(__cpp_nontype_template_parameter_auto) + + // StrFormat() + // + // Returns a `string` given a `printf()`-style format string and zero or more + // additional arguments. Use it as you would `sprintf()`. `StrFormat()` is the + // primary formatting function within the `str_format` library, and should be + // used in most cases where you need type-safe conversion of types into + // formatted strings. + // + // The format string generally consists of ordinary character data along with + // one or more format conversion specifiers (denoted by the `%` character). + // Ordinary character data is returned unchanged into the result string, while + // each conversion specification performs a type substitution from + // `StrFormat()`'s other arguments. See the comments for `FormatSpec` for full + // information on the makeup of this format string. + // + // Example: + // + // std::string s = absl::StrFormat( + // "Welcome to %s, Number %d!", "The Village", 6); + // EXPECT_EQ("Welcome to The Village, Number 6!", s); + // + // Returns an empty string in case of error. + template + ABSL_MUST_USE_RESULT std::string StrFormat(const FormatSpec& format, const Args&... args) + { + return str_format_internal::FormatPack( + str_format_internal::UntypedFormatSpecImpl::Extract(format), + {str_format_internal::FormatArgImpl(args)...} + ); + } + + // StrAppendFormat() + // + // Appends to a `dst` string given a format string, and zero or more additional + // arguments, returning `*dst` as a convenience for chaining purposes. Appends + // nothing in case of error (but possibly alters its capacity). + // + // Example: + // + // std::string orig("For example PI is approximately "); + // std::cout << StrAppendFormat(&orig, "%12.6f", 3.14); + template + std::string& StrAppendFormat(std::string* dst, const FormatSpec& format, const Args&... args) + { + return str_format_internal::AppendPack( + dst, str_format_internal::UntypedFormatSpecImpl::Extract(format), {str_format_internal::FormatArgImpl(args)...} + ); + } + + // StreamFormat() + // + // Writes to an output stream given a format string and zero or more arguments, + // generally in a manner that is more efficient than streaming the result of + // `absl:: StrFormat()`. The returned object must be streamed before the full + // expression ends. + // + // Example: + // + // std::cout << StreamFormat("%12.6f", 3.14); + template + ABSL_MUST_USE_RESULT str_format_internal::Streamable StreamFormat( + const FormatSpec& format, const Args&... args + ) + { + return str_format_internal::Streamable( + str_format_internal::UntypedFormatSpecImpl::Extract(format), + {str_format_internal::FormatArgImpl(args)...} + ); + } + + // PrintF() + // + // Writes to stdout given a format string and zero or more arguments. This + // function is functionally equivalent to `std::printf()` (and type-safe); + // prefer `absl::PrintF()` over `std::printf()`. + // + // Example: + // + // std::string_view s = "Ulaanbaatar"; + // absl::PrintF("The capital of Mongolia is %s", s); + // + // Outputs: "The capital of Mongolia is Ulaanbaatar" + // + template + int PrintF(const FormatSpec& format, const Args&... args) + { + return str_format_internal::FprintF( + stdout, str_format_internal::UntypedFormatSpecImpl::Extract(format), {str_format_internal::FormatArgImpl(args)...} + ); + } + + // FPrintF() + // + // Writes to a file given a format string and zero or more arguments. This + // function is functionally equivalent to `std::fprintf()` (and type-safe); + // prefer `absl::FPrintF()` over `std::fprintf()`. + // + // Example: + // + // std::string_view s = "Ulaanbaatar"; + // absl::FPrintF(stdout, "The capital of Mongolia is %s", s); + // + // Outputs: "The capital of Mongolia is Ulaanbaatar" + // + template + int FPrintF(std::FILE* output, const FormatSpec& format, const Args&... args) + { + return str_format_internal::FprintF( + output, str_format_internal::UntypedFormatSpecImpl::Extract(format), {str_format_internal::FormatArgImpl(args)...} + ); + } + + // SNPrintF() + // + // Writes to a sized buffer given a format string and zero or more arguments. + // This function is functionally equivalent to `std::snprintf()` (and + // type-safe); prefer `absl::SNPrintF()` over `std::snprintf()`. + // + // In particular, a successful call to `absl::SNPrintF()` writes at most `size` + // bytes of the formatted output to `output`, including a NUL-terminator, and + // returns the number of bytes that would have been written if truncation did + // not occur. In the event of an error, a negative value is returned and `errno` + // is set. + // + // Example: + // + // std::string_view s = "Ulaanbaatar"; + // char output[128]; + // absl::SNPrintF(output, sizeof(output), + // "The capital of Mongolia is %s", s); + // + // Post-condition: output == "The capital of Mongolia is Ulaanbaatar" + // + template + int SNPrintF(char* output, std::size_t size, const FormatSpec& format, const Args&... args) + { + return str_format_internal::SnprintF( + output, size, str_format_internal::UntypedFormatSpecImpl::Extract(format), {str_format_internal::FormatArgImpl(args)...} + ); + } + + // ----------------------------------------------------------------------------- + // Custom Output Formatting Functions + // ----------------------------------------------------------------------------- + + // FormatRawSink + // + // FormatRawSink is a type erased wrapper around arbitrary sink objects + // specifically used as an argument to `Format()`. + // + // All the object has to do define an overload of `AbslFormatFlush()` for the + // sink, usually by adding a ADL-based free function in the same namespace as + // the sink: + // + // void AbslFormatFlush(MySink* dest, absl::string_view part); + // + // where `dest` is the pointer passed to `absl::Format()`. The function should + // append `part` to `dest`. + // + // FormatRawSink does not own the passed sink object. The passed object must + // outlive the FormatRawSink. + class FormatRawSink + { + public: + // Implicitly convert from any type that provides the hook function as + // described above. + template::value>::type> + FormatRawSink(T* raw) // NOLINT + : + sink_(raw) + { + } + + private: + friend str_format_internal::FormatRawSinkImpl; + str_format_internal::FormatRawSinkImpl sink_; + }; + + // Format() + // + // Writes a formatted string to an arbitrary sink object (implementing the + // `absl::FormatRawSink` interface), using a format string and zero or more + // additional arguments. + // + // By default, `std::string`, `std::ostream`, and `absl::Cord` are supported as + // destination objects. If a `std::string` is used the formatted string is + // appended to it. + // + // `absl::Format()` is a generic version of `absl::StrAppendFormat()`, for + // custom sinks. The format string, like format strings for `StrFormat()`, is + // checked at compile-time. + // + // On failure, this function returns `false` and the state of the sink is + // unspecified. + template + bool Format(FormatRawSink raw_sink, const FormatSpec& format, const Args&... args) + { + return str_format_internal::FormatUntyped( + str_format_internal::FormatRawSinkImpl::Extract(raw_sink), + str_format_internal::UntypedFormatSpecImpl::Extract(format), + {str_format_internal::FormatArgImpl(args)...} + ); + } + + // FormatArg + // + // A type-erased handle to a format argument specifically used as an argument to + // `FormatUntyped()`. You may construct `FormatArg` by passing + // reference-to-const of any printable type. `FormatArg` is both copyable and + // assignable. The source data must outlive the `FormatArg` instance. See + // example below. + // + using FormatArg = str_format_internal::FormatArgImpl; + + // FormatUntyped() + // + // Writes a formatted string to an arbitrary sink object (implementing the + // `absl::FormatRawSink` interface), using an `UntypedFormatSpec` and zero or + // more additional arguments. + // + // This function acts as the most generic formatting function in the + // `str_format` library. The caller provides a raw sink, an unchecked format + // string, and (usually) a runtime specified list of arguments; no compile-time + // checking of formatting is performed within this function. As a result, a + // caller should check the return value to verify that no error occurred. + // On failure, this function returns `false` and the state of the sink is + // unspecified. + // + // The arguments are provided in an `absl::Span`. + // Each `absl::FormatArg` object binds to a single argument and keeps a + // reference to it. The values used to create the `FormatArg` objects must + // outlive this function call. + // + // Example: + // + // std::optional FormatDynamic( + // const std::string& in_format, + // const vector& in_args) { + // std::string out; + // std::vector args; + // for (const auto& v : in_args) { + // // It is important that 'v' is a reference to the objects in in_args. + // // The values we pass to FormatArg must outlive the call to + // // FormatUntyped. + // args.emplace_back(v); + // } + // absl::UntypedFormatSpec format(in_format); + // if (!absl::FormatUntyped(&out, format, args)) { + // return std::nullopt; + // } + // return std::move(out); + // } + // + ABSL_MUST_USE_RESULT inline bool FormatUntyped( + FormatRawSink raw_sink, const UntypedFormatSpec& format, absl::Span args + ) + { + return str_format_internal::FormatUntyped( + str_format_internal::FormatRawSinkImpl::Extract(raw_sink), + str_format_internal::UntypedFormatSpecImpl::Extract(format), + args + ); + } + + //------------------------------------------------------------------------------ + // StrFormat Extensions + //------------------------------------------------------------------------------ + // + // AbslStringify() + // + // A simpler customization API for formatting user-defined types using + // absl::StrFormat(). The API relies on detecting an overload in the + // user-defined type's namespace of a free (non-member) `AbslStringify()` + // function as a friend definition with the following signature: + // + // template + // void AbslStringify(Sink& sink, const X& value); + // + // An `AbslStringify()` overload for a type should only be declared in the same + // file and namespace as said type. + // + // Note that unlike with AbslFormatConvert(), AbslStringify() does not allow + // customization of allowed conversion characters. AbslStringify() uses `%v` as + // the underlying conversion specififer. Additionally, AbslStringify() supports + // use with absl::StrCat while AbslFormatConvert() does not. + // + // Example: + // + // struct Point { + // // To add formatting support to `Point`, we simply need to add a free + // // (non-member) function `AbslStringify()`. This method prints in the + // // request format using the underlying `%v` specifier. You can add such a + // // free function using a friend declaration within the body of the class. + // // The sink parameter is a templated type to avoid requiring dependencies. + // template + // friend void AbslStringify(Sink& sink, const Point& p) { + // absl::Format(&sink, "(%v, %v)", p.x, p.y); + // } + // + // int x; + // int y; + // }; + // + // AbslFormatConvert() + // + // The StrFormat library provides a customization API for formatting + // user-defined types using absl::StrFormat(). The API relies on detecting an + // overload in the user-defined type's namespace of a free (non-member) + // `AbslFormatConvert()` function, usually as a friend definition with the + // following signature: + // + // absl::FormatConvertResult<...> AbslFormatConvert( + // const X& value, + // const absl::FormatConversionSpec& spec, + // absl::FormatSink *sink); + // + // An `AbslFormatConvert()` overload for a type should only be declared in the + // same file and namespace as said type. + // + // The abstractions within this definition include: + // + // * An `absl::FormatConversionSpec` to specify the fields to pull from a + // user-defined type's format string + // * An `absl::FormatSink` to hold the converted string data during the + // conversion process. + // * An `absl::FormatConvertResult` to hold the status of the returned + // formatting operation + // + // The return type encodes all the conversion characters that your + // AbslFormatConvert() routine accepts. The return value should be {true}. + // A return value of {false} will result in `StrFormat()` returning + // an empty string. This result will be propagated to the result of + // `FormatUntyped`. + // + // Example: + // + // struct Point { + // // To add formatting support to `Point`, we simply need to add a free + // // (non-member) function `AbslFormatConvert()`. This method interprets + // // `spec` to print in the request format. The allowed conversion characters + // // can be restricted via the type of the result, in this example + // // string and integral formatting are allowed (but not, for instance + // // floating point characters like "%f"). You can add such a free function + // // using a friend declaration within the body of the class: + // friend absl::FormatConvertResult + // AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec, + // absl::FormatSink* s) { + // if (spec.conversion_char() == absl::FormatConversionChar::s) { + // absl::Format(s, "x=%vy=%v", p.x, p.y); + // } else { + // absl::Format(s, "%v,%v", p.x, p.y); + // } + // return {true}; + // } + // + // int x; + // int y; + // }; + + // clang-format off + +// FormatConversionChar +// +// Specifies the formatting character provided in the format string +// passed to `StrFormat()`. +enum class FormatConversionChar : uint8_t { + c, s, // text + d, i, o, u, x, X, // int + f, F, e, E, g, G, a, A, // float + n, p, v // misc +}; + // clang-format on + + // FormatConversionSpec + // + // Specifies modifications to the conversion of the format string, through use + // of one or more format flags in the source format string. + class FormatConversionSpec + { + public: + // FormatConversionSpec::is_basic() + // + // Indicates that width and precision are not specified, and no additional + // flags are set for this conversion character in the format string. + bool is_basic() const + { + return impl_.is_basic(); + } + + // FormatConversionSpec::has_left_flag() + // + // Indicates whether the result should be left justified for this conversion + // character in the format string. This flag is set through use of a '-' + // character in the format string. E.g. "%-s" + bool has_left_flag() const + { + return impl_.has_left_flag(); + } + + // FormatConversionSpec::has_show_pos_flag() + // + // Indicates whether a sign column is prepended to the result for this + // conversion character in the format string, even if the result is positive. + // This flag is set through use of a '+' character in the format string. + // E.g. "%+d" + bool has_show_pos_flag() const + { + return impl_.has_show_pos_flag(); + } + + // FormatConversionSpec::has_sign_col_flag() + // + // Indicates whether a mandatory sign column is added to the result for this + // conversion character. This flag is set through use of a space character + // (' ') in the format string. E.g. "% i" + bool has_sign_col_flag() const + { + return impl_.has_sign_col_flag(); + } + + // FormatConversionSpec::has_alt_flag() + // + // Indicates whether an "alternate" format is applied to the result for this + // conversion character. Alternative forms depend on the type of conversion + // character, and unallowed alternatives are undefined. This flag is set + // through use of a '#' character in the format string. E.g. "%#h" + bool has_alt_flag() const + { + return impl_.has_alt_flag(); + } + + // FormatConversionSpec::has_zero_flag() + // + // Indicates whether zeroes should be prepended to the result for this + // conversion character instead of spaces. This flag is set through use of the + // '0' character in the format string. E.g. "%0f" + bool has_zero_flag() const + { + return impl_.has_zero_flag(); + } + + // FormatConversionSpec::conversion_char() + // + // Returns the underlying conversion character. + FormatConversionChar conversion_char() const + { + return impl_.conversion_char(); + } + + // FormatConversionSpec::width() + // + // Returns the specified width (indicated through use of a non-zero integer + // value or '*' character) of the conversion character. If width is + // unspecified, it returns a negative value. + int width() const + { + return impl_.width(); + } + + // FormatConversionSpec::precision() + // + // Returns the specified precision (through use of the '.' character followed + // by a non-zero integer value or '*' character) of the conversion character. + // If precision is unspecified, it returns a negative value. + int precision() const + { + return impl_.precision(); + } + + private: + explicit FormatConversionSpec( + str_format_internal::FormatConversionSpecImpl impl + ) : + impl_(impl) + { + } + + friend str_format_internal::FormatConversionSpecImpl; + + absl::str_format_internal::FormatConversionSpecImpl impl_; + }; + + // Type safe OR operator for FormatConversionCharSet to allow accepting multiple + // conversion chars in custom format converters. + constexpr FormatConversionCharSet operator|(FormatConversionCharSet a, FormatConversionCharSet b) + { + return static_cast(static_cast(a) | static_cast(b)); + } + + // FormatConversionCharSet + // + // Specifies the _accepted_ conversion types as a template parameter to + // FormatConvertResult for custom implementations of `AbslFormatConvert`. + // Note the helper predefined alias definitions (kIntegral, etc.) below. + enum class FormatConversionCharSet : uint64_t + { + // text + c = str_format_internal::FormatConversionCharToConvInt('c'), + s = str_format_internal::FormatConversionCharToConvInt('s'), + // integer + d = str_format_internal::FormatConversionCharToConvInt('d'), + i = str_format_internal::FormatConversionCharToConvInt('i'), + o = str_format_internal::FormatConversionCharToConvInt('o'), + u = str_format_internal::FormatConversionCharToConvInt('u'), + x = str_format_internal::FormatConversionCharToConvInt('x'), + X = str_format_internal::FormatConversionCharToConvInt('X'), + // Float + f = str_format_internal::FormatConversionCharToConvInt('f'), + F = str_format_internal::FormatConversionCharToConvInt('F'), + e = str_format_internal::FormatConversionCharToConvInt('e'), + E = str_format_internal::FormatConversionCharToConvInt('E'), + g = str_format_internal::FormatConversionCharToConvInt('g'), + G = str_format_internal::FormatConversionCharToConvInt('G'), + a = str_format_internal::FormatConversionCharToConvInt('a'), + A = str_format_internal::FormatConversionCharToConvInt('A'), + // misc + n = str_format_internal::FormatConversionCharToConvInt('n'), + p = str_format_internal::FormatConversionCharToConvInt('p'), + v = str_format_internal::FormatConversionCharToConvInt('v'), + + // Used for width/precision '*' specification. + kStar = static_cast( + absl::str_format_internal::FormatConversionCharSetInternal::kStar + ), + // Some predefined values: + kIntegral = d | i | u | o | x | X, + kFloating = a | e | f | g | A | E | F | G, + kNumeric = kIntegral | kFloating, + kString = s, + kPointer = p, + }; + + // FormatSink + // + // A format sink is a generic abstraction to which conversions may write their + // formatted string data. `absl::FormatConvert()` uses this sink to write its + // formatted string. + // + class FormatSink + { + public: + // FormatSink::Append() + // + // Appends `count` copies of `ch` to the format sink. + void Append(size_t count, char ch) + { + sink_->Append(count, ch); + } + + // Overload of FormatSink::Append() for appending the characters of a string + // view to a format sink. + void Append(string_view v) + { + sink_->Append(v); + } + + // FormatSink::PutPaddedString() + // + // Appends `precision` number of bytes of `v` to the format sink. If this is + // less than `width`, spaces will be appended first (if `left` is false), or + // after (if `left` is true) to ensure the total amount appended is + // at least `width`. + bool PutPaddedString(string_view v, int width, int precision, bool left) + { + return sink_->PutPaddedString(v, width, precision, left); + } + + // Support `absl::Format(&sink, format, args...)`. + friend void AbslFormatFlush(FormatSink* sink, absl::string_view v) + { + sink->Append(v); + } + + private: + friend str_format_internal::FormatSinkImpl; + explicit FormatSink(str_format_internal::FormatSinkImpl* s) : + sink_(s) + { + } + str_format_internal::FormatSinkImpl* sink_; + }; + + // FormatConvertResult + // + // Indicates whether a call to AbslFormatConvert() was successful. + // This return type informs the StrFormat extension framework (through + // ADL but using the return type) of what conversion characters are supported. + // It is strongly discouraged to return {false}, as this will result in an + // empty string in StrFormat. + template + struct FormatConvertResult + { + bool value; + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STR_FORMAT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/str_join.h b/CAPI/cpp/grpc/include/absl/strings/str_join.h new file mode 100644 index 00000000..12dcf056 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/str_join.h @@ -0,0 +1,299 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: str_join.h +// ----------------------------------------------------------------------------- +// +// This header file contains functions for joining a range of elements and +// returning the result as a std::string. StrJoin operations are specified by +// passing a range, a separator string to use between the elements joined, and +// an optional Formatter responsible for converting each argument in the range +// to a string. If omitted, a default `AlphaNumFormatter()` is called on the +// elements to be joined, using the same formatting that `absl::StrCat()` uses. +// This package defines a number of default formatters, and you can define your +// own implementations. +// +// Ranges are specified by passing a container with `std::begin()` and +// `std::end()` iterators, container-specific `begin()` and `end()` iterators, a +// brace-initialized `std::initializer_list`, or a `std::tuple` of heterogeneous +// objects. The separator string is specified as an `absl::string_view`. +// +// Because the default formatter uses the `absl::AlphaNum` class, +// `absl::StrJoin()`, like `absl::StrCat()`, will work out-of-the-box on +// collections of strings, ints, floats, doubles, etc. +// +// Example: +// +// std::vector v = {"foo", "bar", "baz"}; +// std::string s = absl::StrJoin(v, "-"); +// EXPECT_EQ("foo-bar-baz", s); +// +// See comments on the `absl::StrJoin()` function for more examples. + +#ifndef ABSL_STRINGS_STR_JOIN_H_ +#define ABSL_STRINGS_STR_JOIN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/strings/internal/str_join_internal.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // Concept: Formatter + // ----------------------------------------------------------------------------- + // + // A Formatter is a function object that is responsible for formatting its + // argument as a string and appending it to a given output std::string. + // Formatters may be implemented as function objects, lambdas, or normal + // functions. You may provide your own Formatter to enable `absl::StrJoin()` to + // work with arbitrary types. + // + // The following is an example of a custom Formatter that uses + // `absl::FormatDuration` to join a list of `absl::Duration`s. + // + // std::vector v = {absl::Seconds(1), absl::Milliseconds(10)}; + // std::string s = + // absl::StrJoin(v, ", ", [](std::string* out, absl::Duration dur) { + // absl::StrAppend(out, absl::FormatDuration(dur)); + // }); + // EXPECT_EQ("1s, 10ms", s); + // + // The following standard formatters are provided within this file: + // + // - `AlphaNumFormatter()` (the default) + // - `StreamFormatter()` + // - `PairFormatter()` + // - `DereferenceFormatter()` + + // AlphaNumFormatter() + // + // Default formatter used if none is specified. Uses `absl::AlphaNum` to convert + // numeric arguments to strings. + inline strings_internal::AlphaNumFormatterImpl AlphaNumFormatter() + { + return strings_internal::AlphaNumFormatterImpl(); + } + + // StreamFormatter() + // + // Formats its argument using the << operator. + inline strings_internal::StreamFormatterImpl StreamFormatter() + { + return strings_internal::StreamFormatterImpl(); + } + + // Function Template: PairFormatter(Formatter, absl::string_view, Formatter) + // + // Formats a `std::pair` by putting a given separator between the pair's + // `.first` and `.second` members. This formatter allows you to specify + // custom Formatters for both the first and second member of each pair. + template + inline strings_internal::PairFormatterImpl + PairFormatter(FirstFormatter f1, absl::string_view sep, SecondFormatter f2) + { + return strings_internal::PairFormatterImpl( + std::move(f1), sep, std::move(f2) + ); + } + + // Function overload of PairFormatter() for using a default + // `AlphaNumFormatter()` for each Formatter in the pair. + inline strings_internal::PairFormatterImpl< + strings_internal::AlphaNumFormatterImpl, + strings_internal::AlphaNumFormatterImpl> + PairFormatter(absl::string_view sep) + { + return PairFormatter(AlphaNumFormatter(), sep, AlphaNumFormatter()); + } + + // Function Template: DereferenceFormatter(Formatter) + // + // Formats its argument by dereferencing it and then applying the given + // formatter. This formatter is useful for formatting a container of + // pointer-to-T. This pattern often shows up when joining repeated fields in + // protocol buffers. + template + strings_internal::DereferenceFormatterImpl DereferenceFormatter( + Formatter&& f + ) + { + return strings_internal::DereferenceFormatterImpl( + std::forward(f) + ); + } + + // Function overload of `DereferenceFormatter()` for using a default + // `AlphaNumFormatter()`. + inline strings_internal::DereferenceFormatterImpl< + strings_internal::AlphaNumFormatterImpl> + DereferenceFormatter() + { + return strings_internal::DereferenceFormatterImpl< + strings_internal::AlphaNumFormatterImpl>(AlphaNumFormatter()); + } + + // ----------------------------------------------------------------------------- + // StrJoin() + // ----------------------------------------------------------------------------- + // + // Joins a range of elements and returns the result as a std::string. + // `absl::StrJoin()` takes a range, a separator string to use between the + // elements joined, and an optional Formatter responsible for converting each + // argument in the range to a string. + // + // If omitted, the default `AlphaNumFormatter()` is called on the elements to be + // joined. + // + // Example 1: + // // Joins a collection of strings. This pattern also works with a collection + // // of `absl::string_view` or even `const char*`. + // std::vector v = {"foo", "bar", "baz"}; + // std::string s = absl::StrJoin(v, "-"); + // EXPECT_EQ("foo-bar-baz", s); + // + // Example 2: + // // Joins the values in the given `std::initializer_list<>` specified using + // // brace initialization. This pattern also works with an initializer_list + // // of ints or `absl::string_view` -- any `AlphaNum`-compatible type. + // std::string s = absl::StrJoin({"foo", "bar", "baz"}, "-"); + // EXPECT_EQ("foo-bar-baz", s); + // + // Example 3: + // // Joins a collection of ints. This pattern also works with floats, + // // doubles, int64s -- any `StrCat()`-compatible type. + // std::vector v = {1, 2, 3, -4}; + // std::string s = absl::StrJoin(v, "-"); + // EXPECT_EQ("1-2-3--4", s); + // + // Example 4: + // // Joins a collection of pointer-to-int. By default, pointers are + // // dereferenced and the pointee is formatted using the default format for + // // that type; such dereferencing occurs for all levels of indirection, so + // // this pattern works just as well for `std::vector` as for + // // `std::vector`. + // int x = 1, y = 2, z = 3; + // std::vector v = {&x, &y, &z}; + // std::string s = absl::StrJoin(v, "-"); + // EXPECT_EQ("1-2-3", s); + // + // Example 5: + // // Dereferencing of `std::unique_ptr<>` is also supported: + // std::vector> v + // v.emplace_back(new int(1)); + // v.emplace_back(new int(2)); + // v.emplace_back(new int(3)); + // std::string s = absl::StrJoin(v, "-"); + // EXPECT_EQ("1-2-3", s); + // + // Example 6: + // // Joins a `std::map`, with each key-value pair separated by an equals + // // sign. This pattern would also work with, say, a + // // `std::vector>`. + // std::map m = { + // std::make_pair("a", 1), + // std::make_pair("b", 2), + // std::make_pair("c", 3)}; + // std::string s = absl::StrJoin(m, ",", absl::PairFormatter("=")); + // EXPECT_EQ("a=1,b=2,c=3", s); + // + // Example 7: + // // These examples show how `absl::StrJoin()` handles a few common edge + // // cases: + // std::vector v_empty; + // EXPECT_EQ("", absl::StrJoin(v_empty, "-")); + // + // std::vector v_one_item = {"foo"}; + // EXPECT_EQ("foo", absl::StrJoin(v_one_item, "-")); + // + // std::vector v_empty_string = {""}; + // EXPECT_EQ("", absl::StrJoin(v_empty_string, "-")); + // + // std::vector v_one_item_empty_string = {"a", ""}; + // EXPECT_EQ("a-", absl::StrJoin(v_one_item_empty_string, "-")); + // + // std::vector v_two_empty_string = {"", ""}; + // EXPECT_EQ("-", absl::StrJoin(v_two_empty_string, "-")); + // + // Example 8: + // // Joins a `std::tuple` of heterogeneous types, converting each to + // // a std::string using the `absl::AlphaNum` class. + // std::string s = absl::StrJoin(std::make_tuple(123, "abc", 0.456), "-"); + // EXPECT_EQ("123-abc-0.456", s); + + template + std::string StrJoin(Iterator start, Iterator end, absl::string_view sep, Formatter&& fmt) + { + return strings_internal::JoinAlgorithm(start, end, sep, fmt); + } + + template + std::string StrJoin(const Range& range, absl::string_view separator, Formatter&& fmt) + { + return strings_internal::JoinRange(range, separator, fmt); + } + + template + std::string StrJoin(std::initializer_list il, absl::string_view separator, Formatter&& fmt) + { + return strings_internal::JoinRange(il, separator, fmt); + } + + template + std::string StrJoin(const std::tuple& value, absl::string_view separator, Formatter&& fmt) + { + return strings_internal::JoinAlgorithm(value, separator, fmt); + } + + template + std::string StrJoin(Iterator start, Iterator end, absl::string_view separator) + { + return strings_internal::JoinRange(start, end, separator); + } + + template + std::string StrJoin(const Range& range, absl::string_view separator) + { + return strings_internal::JoinRange(range, separator); + } + + template + std::string StrJoin(std::initializer_list il, absl::string_view separator) + { + return strings_internal::JoinRange(il, separator); + } + + template + std::string StrJoin(const std::tuple& value, absl::string_view separator) + { + return strings_internal::JoinAlgorithm(value, separator, AlphaNumFormatter()); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STR_JOIN_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/str_replace.h b/CAPI/cpp/grpc/include/absl/strings/str_replace.h new file mode 100644 index 00000000..a550908e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/str_replace.h @@ -0,0 +1,234 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: str_replace.h +// ----------------------------------------------------------------------------- +// +// This file defines `absl::StrReplaceAll()`, a general-purpose string +// replacement function designed for large, arbitrary text substitutions, +// especially on strings which you are receiving from some other system for +// further processing (e.g. processing regular expressions, escaping HTML +// entities, etc.). `StrReplaceAll` is designed to be efficient even when only +// one substitution is being performed, or when substitution is rare. +// +// If the string being modified is known at compile-time, and the substitutions +// vary, `absl::Substitute()` may be a better choice. +// +// Example: +// +// std::string html_escaped = absl::StrReplaceAll(user_input, { +// {"&", "&"}, +// {"<", "<"}, +// {">", ">"}, +// {"\"", """}, +// {"'", "'"}}); +#ifndef ABSL_STRINGS_STR_REPLACE_H_ +#define ABSL_STRINGS_STR_REPLACE_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // StrReplaceAll() + // + // Replaces character sequences within a given string with replacements provided + // within an initializer list of key/value pairs. Candidate replacements are + // considered in order as they occur within the string, with earlier matches + // taking precedence, and longer matches taking precedence for candidates + // starting at the same position in the string. Once a substitution is made, the + // replaced text is not considered for any further substitutions. + // + // Example: + // + // std::string s = absl::StrReplaceAll( + // "$who bought $count #Noun. Thanks $who!", + // {{"$count", absl::StrCat(5)}, + // {"$who", "Bob"}, + // {"#Noun", "Apples"}}); + // EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s); + ABSL_MUST_USE_RESULT std::string StrReplaceAll( + absl::string_view s, + std::initializer_list> + replacements + ); + + // Overload of `StrReplaceAll()` to accept a container of key/value replacement + // pairs (typically either an associative map or a `std::vector` of `std::pair` + // elements). A vector of pairs is generally more efficient. + // + // Examples: + // + // std::map replacements; + // replacements["$who"] = "Bob"; + // replacements["$count"] = "5"; + // replacements["#Noun"] = "Apples"; + // std::string s = absl::StrReplaceAll( + // "$who bought $count #Noun. Thanks $who!", + // replacements); + // EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s); + // + // // A std::vector of std::pair elements can be more efficient. + // std::vector> replacements; + // replacements.push_back({"&", "&"}); + // replacements.push_back({"<", "<"}); + // replacements.push_back({">", ">"}); + // std::string s = absl::StrReplaceAll("if (ptr < &foo)", + // replacements); + // EXPECT_EQ("if (ptr < &foo)", s); + template + std::string StrReplaceAll(absl::string_view s, const StrToStrMapping& replacements); + + // Overload of `StrReplaceAll()` to replace character sequences within a given + // output string *in place* with replacements provided within an initializer + // list of key/value pairs, returning the number of substitutions that occurred. + // + // Example: + // + // std::string s = std::string("$who bought $count #Noun. Thanks $who!"); + // int count; + // count = absl::StrReplaceAll({{"$count", absl::StrCat(5)}, + // {"$who", "Bob"}, + // {"#Noun", "Apples"}}, &s); + // EXPECT_EQ(count, 4); + // EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s); + int StrReplaceAll( + std::initializer_list> + replacements, + std::string* target + ); + + // Overload of `StrReplaceAll()` to replace patterns within a given output + // string *in place* with replacements provided within a container of key/value + // pairs. + // + // Example: + // + // std::string s = std::string("if (ptr < &foo)"); + // int count = absl::StrReplaceAll({{"&", "&"}, + // {"<", "<"}, + // {">", ">"}}, &s); + // EXPECT_EQ(count, 2); + // EXPECT_EQ("if (ptr < &foo)", s); + template + int StrReplaceAll(const StrToStrMapping& replacements, std::string* target); + + // Implementation details only, past this point. + namespace strings_internal + { + + struct ViableSubstitution + { + absl::string_view old; + absl::string_view replacement; + size_t offset; + + ViableSubstitution(absl::string_view old_str, absl::string_view replacement_str, size_t offset_val) : + old(old_str), + replacement(replacement_str), + offset(offset_val) + { + } + + // One substitution occurs "before" another (takes priority) if either + // it has the lowest offset, or it has the same offset but a larger size. + bool OccursBefore(const ViableSubstitution& y) const + { + if (offset != y.offset) + return offset < y.offset; + return old.size() > y.old.size(); + } + }; + + // Build a vector of ViableSubstitutions based on the given list of + // replacements. subs can be implemented as a priority_queue. However, it turns + // out that most callers have small enough a list of substitutions that the + // overhead of such a queue isn't worth it. + template + std::vector FindSubstitutions( + absl::string_view s, const StrToStrMapping& replacements + ) + { + std::vector subs; + subs.reserve(replacements.size()); + + for (const auto& rep : replacements) + { + using std::get; + absl::string_view old(get<0>(rep)); + + size_t pos = s.find(old); + if (pos == s.npos) + continue; + + // Ignore attempts to replace "". This condition is almost never true, + // but above condition is frequently true. That's why we test for this + // now and not before. + if (old.empty()) + continue; + + subs.emplace_back(old, get<1>(rep), pos); + + // Insertion sort to ensure the last ViableSubstitution comes before + // all the others. + size_t index = subs.size(); + while (--index && subs[index - 1].OccursBefore(subs[index])) + { + std::swap(subs[index], subs[index - 1]); + } + } + return subs; + } + + int ApplySubstitutions(absl::string_view s, std::vector* subs_ptr, std::string* result_ptr); + + } // namespace strings_internal + + template + std::string StrReplaceAll(absl::string_view s, const StrToStrMapping& replacements) + { + auto subs = strings_internal::FindSubstitutions(s, replacements); + std::string result; + result.reserve(s.size()); + strings_internal::ApplySubstitutions(s, &subs, &result); + return result; + } + + template + int StrReplaceAll(const StrToStrMapping& replacements, std::string* target) + { + auto subs = strings_internal::FindSubstitutions(*target, replacements); + if (subs.empty()) + return 0; + + std::string result; + result.reserve(target->size()); + int substitutions = + strings_internal::ApplySubstitutions(*target, &subs, &result); + target->swap(result); + return substitutions; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STR_REPLACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/str_split.h b/CAPI/cpp/grpc/include/absl/strings/str_split.h new file mode 100644 index 00000000..0bf76218 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/str_split.h @@ -0,0 +1,586 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: str_split.h +// ----------------------------------------------------------------------------- +// +// This file contains functions for splitting strings. It defines the main +// `StrSplit()` function, several delimiters for determining the boundaries on +// which to split the string, and predicates for filtering delimited results. +// `StrSplit()` adapts the returned collection to the type specified by the +// caller. +// +// Example: +// +// // Splits the given string on commas. Returns the results in a +// // vector of strings. +// std::vector v = absl::StrSplit("a,b,c", ','); +// // Can also use "," +// // v[0] == "a", v[1] == "b", v[2] == "c" +// +// See StrSplit() below for more information. +#ifndef ABSL_STRINGS_STR_SPLIT_H_ +#define ABSL_STRINGS_STR_SPLIT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/strings/internal/str_split_internal.h" +#include "absl/strings/string_view.h" +#include "absl/strings/strip.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + //------------------------------------------------------------------------------ + // Delimiters + //------------------------------------------------------------------------------ + // + // `StrSplit()` uses delimiters to define the boundaries between elements in the + // provided input. Several `Delimiter` types are defined below. If a string + // (`const char*`, `std::string`, or `absl::string_view`) is passed in place of + // an explicit `Delimiter` object, `StrSplit()` treats it the same way as if it + // were passed a `ByString` delimiter. + // + // A `Delimiter` is an object with a `Find()` function that knows how to find + // the first occurrence of itself in a given `absl::string_view`. + // + // The following `Delimiter` types are available for use within `StrSplit()`: + // + // - `ByString` (default for string arguments) + // - `ByChar` (default for a char argument) + // - `ByAnyChar` + // - `ByLength` + // - `MaxSplits` + // + // A Delimiter's `Find()` member function will be passed an input `text` that is + // to be split and a position (`pos`) to begin searching for the next delimiter + // in `text`. The returned absl::string_view should refer to the next occurrence + // (after `pos`) of the represented delimiter; this returned absl::string_view + // represents the next location where the input `text` should be broken. + // + // The returned absl::string_view may be zero-length if the Delimiter does not + // represent a part of the string (e.g., a fixed-length delimiter). If no + // delimiter is found in the input `text`, a zero-length absl::string_view + // referring to `text.end()` should be returned (e.g., + // `text.substr(text.size())`). It is important that the returned + // absl::string_view always be within the bounds of the input `text` given as an + // argument--it must not refer to a string that is physically located outside of + // the given string. + // + // The following example is a simple Delimiter object that is created with a + // single char and will look for that char in the text passed to the `Find()` + // function: + // + // struct SimpleDelimiter { + // const char c_; + // explicit SimpleDelimiter(char c) : c_(c) {} + // absl::string_view Find(absl::string_view text, size_t pos) { + // auto found = text.find(c_, pos); + // if (found == absl::string_view::npos) + // return text.substr(text.size()); + // + // return text.substr(found, 1); + // } + // }; + + // ByString + // + // A sub-string delimiter. If `StrSplit()` is passed a string in place of a + // `Delimiter` object, the string will be implicitly converted into a + // `ByString` delimiter. + // + // Example: + // + // // Because a string literal is converted to an `absl::ByString`, + // // the following two splits are equivalent. + // + // std::vector v1 = absl::StrSplit("a, b, c", ", "); + // + // using absl::ByString; + // std::vector v2 = absl::StrSplit("a, b, c", + // ByString(", ")); + // // v[0] == "a", v[1] == "b", v[2] == "c" + class ByString + { + public: + explicit ByString(absl::string_view sp); + absl::string_view Find(absl::string_view text, size_t pos) const; + + private: + const std::string delimiter_; + }; + + // ByChar + // + // A single character delimiter. `ByChar` is functionally equivalent to a + // 1-char string within a `ByString` delimiter, but slightly more efficient. + // + // Example: + // + // // Because a char literal is converted to a absl::ByChar, + // // the following two splits are equivalent. + // std::vector v1 = absl::StrSplit("a,b,c", ','); + // using absl::ByChar; + // std::vector v2 = absl::StrSplit("a,b,c", ByChar(',')); + // // v[0] == "a", v[1] == "b", v[2] == "c" + // + // `ByChar` is also the default delimiter if a single character is given + // as the delimiter to `StrSplit()`. For example, the following calls are + // equivalent: + // + // std::vector v = absl::StrSplit("a-b", '-'); + // + // using absl::ByChar; + // std::vector v = absl::StrSplit("a-b", ByChar('-')); + // + class ByChar + { + public: + explicit ByChar(char c) : + c_(c) + { + } + absl::string_view Find(absl::string_view text, size_t pos) const; + + private: + char c_; + }; + + // ByAnyChar + // + // A delimiter that will match any of the given byte-sized characters within + // its provided string. + // + // Note: this delimiter works with single-byte string data, but does not work + // with variable-width encodings, such as UTF-8. + // + // Example: + // + // using absl::ByAnyChar; + // std::vector v = absl::StrSplit("a,b=c", ByAnyChar(",=")); + // // v[0] == "a", v[1] == "b", v[2] == "c" + // + // If `ByAnyChar` is given the empty string, it behaves exactly like + // `ByString` and matches each individual character in the input string. + // + class ByAnyChar + { + public: + explicit ByAnyChar(absl::string_view sp); + absl::string_view Find(absl::string_view text, size_t pos) const; + + private: + const std::string delimiters_; + }; + + // ByLength + // + // A delimiter for splitting into equal-length strings. The length argument to + // the constructor must be greater than 0. + // + // Note: this delimiter works with single-byte string data, but does not work + // with variable-width encodings, such as UTF-8. + // + // Example: + // + // using absl::ByLength; + // std::vector v = absl::StrSplit("123456789", ByLength(3)); + + // // v[0] == "123", v[1] == "456", v[2] == "789" + // + // Note that the string does not have to be a multiple of the fixed split + // length. In such a case, the last substring will be shorter. + // + // using absl::ByLength; + // std::vector v = absl::StrSplit("12345", ByLength(2)); + // + // // v[0] == "12", v[1] == "34", v[2] == "5" + class ByLength + { + public: + explicit ByLength(ptrdiff_t length); + absl::string_view Find(absl::string_view text, size_t pos) const; + + private: + const ptrdiff_t length_; + }; + + namespace strings_internal + { + + // A traits-like metafunction for selecting the default Delimiter object type + // for a particular Delimiter type. The base case simply exposes type Delimiter + // itself as the delimiter's Type. However, there are specializations for + // string-like objects that map them to the ByString delimiter object. + // This allows functions like absl::StrSplit() and absl::MaxSplits() to accept + // string-like objects (e.g., ',') as delimiter arguments but they will be + // treated as if a ByString delimiter was given. + template + struct SelectDelimiter + { + using type = Delimiter; + }; + + template<> + struct SelectDelimiter + { + using type = ByChar; + }; + template<> + struct SelectDelimiter + { + using type = ByString; + }; + template<> + struct SelectDelimiter + { + using type = ByString; + }; + template<> + struct SelectDelimiter + { + using type = ByString; + }; + template<> + struct SelectDelimiter + { + using type = ByString; + }; + + // Wraps another delimiter and sets a max number of matches for that delimiter. + template + class MaxSplitsImpl + { + public: + MaxSplitsImpl(Delimiter delimiter, int limit) : + delimiter_(delimiter), + limit_(limit), + count_(0) + { + } + absl::string_view Find(absl::string_view text, size_t pos) + { + if (count_++ == limit_) + { + return absl::string_view(text.data() + text.size(), + 0); // No more matches. + } + return delimiter_.Find(text, pos); + } + + private: + Delimiter delimiter_; + const int limit_; + int count_; + }; + + } // namespace strings_internal + + // MaxSplits() + // + // A delimiter that limits the number of matches which can occur to the passed + // `limit`. The last element in the returned collection will contain all + // remaining unsplit pieces, which may contain instances of the delimiter. + // The collection will contain at most `limit` + 1 elements. + // Example: + // + // using absl::MaxSplits; + // std::vector v = absl::StrSplit("a,b,c", MaxSplits(',', 1)); + // + // // v[0] == "a", v[1] == "b,c" + template + inline strings_internal::MaxSplitsImpl< + typename strings_internal::SelectDelimiter::type> + MaxSplits(Delimiter delimiter, int limit) + { + typedef + typename strings_internal::SelectDelimiter::type DelimiterType; + return strings_internal::MaxSplitsImpl( + DelimiterType(delimiter), limit + ); + } + + //------------------------------------------------------------------------------ + // Predicates + //------------------------------------------------------------------------------ + // + // Predicates filter the results of a `StrSplit()` by determining whether or not + // a resultant element is included in the result set. A predicate may be passed + // as an optional third argument to the `StrSplit()` function. + // + // Predicates are unary functions (or functors) that take a single + // `absl::string_view` argument and return a bool indicating whether the + // argument should be included (`true`) or excluded (`false`). + // + // Predicates are useful when filtering out empty substrings. By default, empty + // substrings may be returned by `StrSplit()`, which is similar to the way split + // functions work in other programming languages. + + // AllowEmpty() + // + // Always returns `true`, indicating that all strings--including empty + // strings--should be included in the split output. This predicate is not + // strictly needed because this is the default behavior of `StrSplit()`; + // however, it might be useful at some call sites to make the intent explicit. + // + // Example: + // + // std::vector v = absl::StrSplit(" a , ,,b,", ',', AllowEmpty()); + // + // // v[0] == " a ", v[1] == " ", v[2] == "", v[3] = "b", v[4] == "" + struct AllowEmpty + { + bool operator()(absl::string_view) const + { + return true; + } + }; + + // SkipEmpty() + // + // Returns `false` if the given `absl::string_view` is empty, indicating that + // `StrSplit()` should omit the empty string. + // + // Example: + // + // std::vector v = absl::StrSplit(",a,,b,", ',', SkipEmpty()); + // + // // v[0] == "a", v[1] == "b" + // + // Note: `SkipEmpty()` does not consider a string containing only whitespace + // to be empty. To skip such whitespace as well, use the `SkipWhitespace()` + // predicate. + struct SkipEmpty + { + bool operator()(absl::string_view sp) const + { + return !sp.empty(); + } + }; + + // SkipWhitespace() + // + // Returns `false` if the given `absl::string_view` is empty *or* contains only + // whitespace, indicating that `StrSplit()` should omit the string. + // + // Example: + // + // std::vector v = absl::StrSplit(" a , ,,b,", + // ',', SkipWhitespace()); + // // v[0] == " a ", v[1] == "b" + // + // // SkipEmpty() would return whitespace elements + // std::vector v = absl::StrSplit(" a , ,,b,", ',', SkipEmpty()); + // // v[0] == " a ", v[1] == " ", v[2] == "b" + struct SkipWhitespace + { + bool operator()(absl::string_view sp) const + { + sp = absl::StripAsciiWhitespace(sp); + return !sp.empty(); + } + }; + + template + using EnableSplitIfString = + typename std::enable_if::value || std::is_same::value, int>::type; + + //------------------------------------------------------------------------------ + // StrSplit() + //------------------------------------------------------------------------------ + + // StrSplit() + // + // Splits a given string based on the provided `Delimiter` object, returning the + // elements within the type specified by the caller. Optionally, you may pass a + // `Predicate` to `StrSplit()` indicating whether to include or exclude the + // resulting element within the final result set. (See the overviews for + // Delimiters and Predicates above.) + // + // Example: + // + // std::vector v = absl::StrSplit("a,b,c,d", ','); + // // v[0] == "a", v[1] == "b", v[2] == "c", v[3] == "d" + // + // You can also provide an explicit `Delimiter` object: + // + // Example: + // + // using absl::ByAnyChar; + // std::vector v = absl::StrSplit("a,b=c", ByAnyChar(",=")); + // // v[0] == "a", v[1] == "b", v[2] == "c" + // + // See above for more information on delimiters. + // + // By default, empty strings are included in the result set. You can optionally + // include a third `Predicate` argument to apply a test for whether the + // resultant element should be included in the result set: + // + // Example: + // + // std::vector v = absl::StrSplit(" a , ,,b,", + // ',', SkipWhitespace()); + // // v[0] == " a ", v[1] == "b" + // + // See above for more information on predicates. + // + //------------------------------------------------------------------------------ + // StrSplit() Return Types + //------------------------------------------------------------------------------ + // + // The `StrSplit()` function adapts the returned collection to the collection + // specified by the caller (e.g. `std::vector` above). The returned collections + // may contain `std::string`, `absl::string_view` (in which case the original + // string being split must ensure that it outlives the collection), or any + // object that can be explicitly created from an `absl::string_view`. This + // behavior works for: + // + // 1) All standard STL containers including `std::vector`, `std::list`, + // `std::deque`, `std::set`,`std::multiset`, 'std::map`, and `std::multimap` + // 2) `std::pair` (which is not actually a container). See below. + // + // Example: + // + // // The results are returned as `absl::string_view` objects. Note that we + // // have to ensure that the input string outlives any results. + // std::vector v = absl::StrSplit("a,b,c", ','); + // + // // Stores results in a std::set, which also performs + // // de-duplication and orders the elements in ascending order. + // std::set a = absl::StrSplit("b,a,c,a,b", ','); + // // v[0] == "a", v[1] == "b", v[2] = "c" + // + // // `StrSplit()` can be used within a range-based for loop, in which case + // // each element will be of type `absl::string_view`. + // std::vector v; + // for (const auto sv : absl::StrSplit("a,b,c", ',')) { + // if (sv != "b") v.emplace_back(sv); + // } + // // v[0] == "a", v[1] == "c" + // + // // Stores results in a map. The map implementation assumes that the input + // // is provided as a series of key/value pairs. For example, the 0th element + // // resulting from the split will be stored as a key to the 1st element. If + // // an odd number of elements are resolved, the last element is paired with + // // a default-constructed value (e.g., empty string). + // std::map m = absl::StrSplit("a,b,c", ','); + // // m["a"] == "b", m["c"] == "" // last component value equals "" + // + // Splitting to `std::pair` is an interesting case because it can hold only two + // elements and is not a collection type. When splitting to a `std::pair` the + // first two split strings become the `std::pair` `.first` and `.second` + // members, respectively. The remaining split substrings are discarded. If there + // are less than two split substrings, the empty string is used for the + // corresponding `std::pair` member. + // + // Example: + // + // // Stores first two split strings as the members in a std::pair. + // std::pair p = absl::StrSplit("a,b,c", ','); + // // p.first == "a", p.second == "b" // "c" is omitted. + // + // The `StrSplit()` function can be used multiple times to perform more + // complicated splitting logic, such as intelligently parsing key-value pairs. + // + // Example: + // + // // The input string "a=b=c,d=e,f=,g" becomes + // // { "a" => "b=c", "d" => "e", "f" => "", "g" => "" } + // std::map m; + // for (absl::string_view sp : absl::StrSplit("a=b=c,d=e,f=,g", ',')) { + // m.insert(absl::StrSplit(sp, absl::MaxSplits('=', 1))); + // } + // EXPECT_EQ("b=c", m.find("a")->second); + // EXPECT_EQ("e", m.find("d")->second); + // EXPECT_EQ("", m.find("f")->second); + // EXPECT_EQ("", m.find("g")->second); + // + // WARNING: Due to a legacy bug that is maintained for backward compatibility, + // splitting the following empty string_views produces different results: + // + // absl::StrSplit(absl::string_view(""), '-'); // {""} + // absl::StrSplit(absl::string_view(), '-'); // {}, but should be {""} + // + // Try not to depend on this distinction because the bug may one day be fixed. + template + strings_internal::Splitter< + typename strings_internal::SelectDelimiter::type, + AllowEmpty, + absl::string_view> + StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d) + { + using DelimiterType = + typename strings_internal::SelectDelimiter::type; + return strings_internal::Splitter( + text.value(), DelimiterType(d), AllowEmpty() + ); + } + + template = 0> + strings_internal::Splitter< + typename strings_internal::SelectDelimiter::type, + AllowEmpty, + std::string> + StrSplit(StringType&& text, Delimiter d) + { + using DelimiterType = + typename strings_internal::SelectDelimiter::type; + return strings_internal::Splitter( + std::move(text), DelimiterType(d), AllowEmpty() + ); + } + + template + strings_internal::Splitter< + typename strings_internal::SelectDelimiter::type, + Predicate, + absl::string_view> + StrSplit(strings_internal::ConvertibleToStringView text, Delimiter d, Predicate p) + { + using DelimiterType = + typename strings_internal::SelectDelimiter::type; + return strings_internal::Splitter( + text.value(), DelimiterType(d), std::move(p) + ); + } + + template = 0> + strings_internal::Splitter< + typename strings_internal::SelectDelimiter::type, + Predicate, + std::string> + StrSplit(StringType&& text, Delimiter d, Predicate p) + { + using DelimiterType = + typename strings_internal::SelectDelimiter::type; + return strings_internal::Splitter( + std::move(text), DelimiterType(d), std::move(p) + ); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STR_SPLIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/string_view.h b/CAPI/cpp/grpc/include/absl/strings/string_view.h new file mode 100644 index 00000000..ac8d52b7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/string_view.h @@ -0,0 +1,796 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: string_view.h +// ----------------------------------------------------------------------------- +// +// This file contains the definition of the `absl::string_view` class. A +// `string_view` points to a contiguous span of characters, often part or all of +// another `std::string`, double-quoted string literal, character array, or even +// another `string_view`. +// +// This `absl::string_view` abstraction is designed to be a drop-in +// replacement for the C++17 `std::string_view` abstraction. +#ifndef ABSL_STRINGS_STRING_VIEW_H_ +#define ABSL_STRINGS_STRING_VIEW_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +#ifdef ABSL_USES_STD_STRING_VIEW + +#include // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using string_view = std::string_view; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_STRING_VIEW + +#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \ + (defined(__GNUC__) && !defined(__clang__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1928) +#define ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp +#else // ABSL_HAVE_BUILTIN(__builtin_memcmp) +#define ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp +#endif // ABSL_HAVE_BUILTIN(__builtin_memcmp) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::string_view + // + // A `string_view` provides a lightweight view into the string data provided by + // a `std::string`, double-quoted string literal, character array, or even + // another `string_view`. A `string_view` does *not* own the string to which it + // points, and that data cannot be modified through the view. + // + // You can use `string_view` as a function or method parameter anywhere a + // parameter can receive a double-quoted string literal, `const char*`, + // `std::string`, or another `absl::string_view` argument with no need to copy + // the string data. Systematic use of `string_view` within function arguments + // reduces data copies and `strlen()` calls. + // + // Because of its small size, prefer passing `string_view` by value: + // + // void MyFunction(absl::string_view arg); + // + // If circumstances require, you may also pass one by const reference: + // + // void MyFunction(const absl::string_view& arg); // not preferred + // + // Passing by value generates slightly smaller code for many architectures. + // + // In either case, the source data of the `string_view` must outlive the + // `string_view` itself. + // + // A `string_view` is also suitable for local variables if you know that the + // lifetime of the underlying object is longer than the lifetime of your + // `string_view` variable. However, beware of binding a `string_view` to a + // temporary value: + // + // // BAD use of string_view: lifetime problem + // absl::string_view sv = obj.ReturnAString(); + // + // // GOOD use of string_view: str outlives sv + // std::string str = obj.ReturnAString(); + // absl::string_view sv = str; + // + // Due to lifetime issues, a `string_view` is sometimes a poor choice for a + // return value and usually a poor choice for a data member. If you do use a + // `string_view` this way, it is your responsibility to ensure that the object + // pointed to by the `string_view` outlives the `string_view`. + // + // A `string_view` may represent a whole string or just part of a string. For + // example, when splitting a string, `std::vector` is a + // natural data type for the output. + // + // For another example, a Cord is a non-contiguous, potentially very + // long string-like object. The Cord class has an interface that iteratively + // provides string_view objects that point to the successive pieces of a Cord + // object. + // + // When constructed from a source which is NUL-terminated, the `string_view` + // itself will not include the NUL-terminator unless a specific size (including + // the NUL) is passed to the constructor. As a result, common idioms that work + // on NUL-terminated strings do not work on `string_view` objects. If you write + // code that scans a `string_view`, you must check its length rather than test + // for nul, for example. Note, however, that nuls may still be embedded within + // a `string_view` explicitly. + // + // You may create a null `string_view` in two ways: + // + // absl::string_view sv; + // absl::string_view sv(nullptr, 0); + // + // For the above, `sv.data() == nullptr`, `sv.length() == 0`, and + // `sv.empty() == true`. Also, if you create a `string_view` with a non-null + // pointer then `sv.data() != nullptr`. Thus, you can use `string_view()` to + // signal an undefined value that is different from other `string_view` values + // in a similar fashion to how `const char* p1 = nullptr;` is different from + // `const char* p2 = "";`. However, in practice, it is not recommended to rely + // on this behavior. + // + // Be careful not to confuse a null `string_view` with an empty one. A null + // `string_view` is an empty `string_view`, but some empty `string_view`s are + // not null. Prefer checking for emptiness over checking for null. + // + // There are many ways to create an empty string_view: + // + // const char* nullcp = nullptr; + // // string_view.size() will return 0 in all cases. + // absl::string_view(); + // absl::string_view(nullcp, 0); + // absl::string_view(""); + // absl::string_view("", 0); + // absl::string_view("abcdef", 0); + // absl::string_view("abcdef" + 6, 0); + // + // All empty `string_view` objects whether null or not, are equal: + // + // absl::string_view() == absl::string_view("", 0) + // absl::string_view(nullptr, 0) == absl::string_view("abcdef"+6, 0) + class string_view + { + public: + using traits_type = std::char_traits; + using value_type = char; + using pointer = char*; + using const_pointer = const char*; + using reference = char&; + using const_reference = const char&; + using const_iterator = const char*; + using iterator = const_iterator; + using const_reverse_iterator = std::reverse_iterator; + using reverse_iterator = const_reverse_iterator; + using size_type = size_t; + using difference_type = std::ptrdiff_t; + + static constexpr size_type npos = static_cast(-1); + + // Null `string_view` constructor + constexpr string_view() noexcept : + ptr_(nullptr), + length_(0) + { + } + + // Implicit constructors + + template + string_view( // NOLINT(runtime/explicit) + const std::basic_string, Allocator>& str + ABSL_ATTRIBUTE_LIFETIME_BOUND + ) noexcept + // This is implemented in terms of `string_view(p, n)` so `str.size()` + // doesn't need to be reevaluated after `ptr_` is set. + // The length check is also skipped since it is unnecessary and causes + // code bloat. + : + string_view(str.data(), str.size(), SkipCheckLengthTag{}) + { + } + + // Implicit constructor of a `string_view` from NUL-terminated `str`. When + // accepting possibly null strings, use `absl::NullSafeStringView(str)` + // instead (see below). + // The length check is skipped since it is unnecessary and causes code bloat. + constexpr string_view(const char* str) // NOLINT(runtime/explicit) + : + ptr_(str), + length_(str ? StrlenInternal(str) : 0) + { + } + + // Implicit constructor of a `string_view` from a `const char*` and length. + constexpr string_view(const char* data, size_type len) : + ptr_(data), + length_(CheckLengthInternal(len)) + { + } + + // NOTE: Harmlessly omitted to work around gdb bug. + // constexpr string_view(const string_view&) noexcept = default; + // string_view& operator=(const string_view&) noexcept = default; + + // Iterators + + // string_view::begin() + // + // Returns an iterator pointing to the first character at the beginning of the + // `string_view`, or `end()` if the `string_view` is empty. + constexpr const_iterator begin() const noexcept + { + return ptr_; + } + + // string_view::end() + // + // Returns an iterator pointing just beyond the last character at the end of + // the `string_view`. This iterator acts as a placeholder; attempting to + // access it results in undefined behavior. + constexpr const_iterator end() const noexcept + { + return ptr_ + length_; + } + + // string_view::cbegin() + // + // Returns a const iterator pointing to the first character at the beginning + // of the `string_view`, or `end()` if the `string_view` is empty. + constexpr const_iterator cbegin() const noexcept + { + return begin(); + } + + // string_view::cend() + // + // Returns a const iterator pointing just beyond the last character at the end + // of the `string_view`. This pointer acts as a placeholder; attempting to + // access its element results in undefined behavior. + constexpr const_iterator cend() const noexcept + { + return end(); + } + + // string_view::rbegin() + // + // Returns a reverse iterator pointing to the last character at the end of the + // `string_view`, or `rend()` if the `string_view` is empty. + const_reverse_iterator rbegin() const noexcept + { + return const_reverse_iterator(end()); + } + + // string_view::rend() + // + // Returns a reverse iterator pointing just before the first character at the + // beginning of the `string_view`. This pointer acts as a placeholder; + // attempting to access its element results in undefined behavior. + const_reverse_iterator rend() const noexcept + { + return const_reverse_iterator(begin()); + } + + // string_view::crbegin() + // + // Returns a const reverse iterator pointing to the last character at the end + // of the `string_view`, or `crend()` if the `string_view` is empty. + const_reverse_iterator crbegin() const noexcept + { + return rbegin(); + } + + // string_view::crend() + // + // Returns a const reverse iterator pointing just before the first character + // at the beginning of the `string_view`. This pointer acts as a placeholder; + // attempting to access its element results in undefined behavior. + const_reverse_iterator crend() const noexcept + { + return rend(); + } + + // Capacity Utilities + + // string_view::size() + // + // Returns the number of characters in the `string_view`. + constexpr size_type size() const noexcept + { + return length_; + } + + // string_view::length() + // + // Returns the number of characters in the `string_view`. Alias for `size()`. + constexpr size_type length() const noexcept + { + return size(); + } + + // string_view::max_size() + // + // Returns the maximum number of characters the `string_view` can hold. + constexpr size_type max_size() const noexcept + { + return kMaxSize; + } + + // string_view::empty() + // + // Checks if the `string_view` is empty (refers to no characters). + constexpr bool empty() const noexcept + { + return length_ == 0; + } + + // string_view::operator[] + // + // Returns the ith element of the `string_view` using the array operator. + // Note that this operator does not perform any bounds checking. + constexpr const_reference operator[](size_type i) const + { + return ABSL_HARDENING_ASSERT(i < size()), ptr_[i]; + } + + // string_view::at() + // + // Returns the ith element of the `string_view`. Bounds checking is performed, + // and an exception of type `std::out_of_range` will be thrown on invalid + // access. + constexpr const_reference at(size_type i) const + { + return ABSL_PREDICT_TRUE(i < size()) ? ptr_[i] : ((void)base_internal::ThrowStdOutOfRange("absl::string_view::at"), ptr_[i]); + } + + // string_view::front() + // + // Returns the first element of a `string_view`. + constexpr const_reference front() const + { + return ABSL_HARDENING_ASSERT(!empty()), ptr_[0]; + } + + // string_view::back() + // + // Returns the last element of a `string_view`. + constexpr const_reference back() const + { + return ABSL_HARDENING_ASSERT(!empty()), ptr_[size() - 1]; + } + + // string_view::data() + // + // Returns a pointer to the underlying character array (which is of course + // stored elsewhere). Note that `string_view::data()` may contain embedded nul + // characters, but the returned buffer may or may not be NUL-terminated; + // therefore, do not pass `data()` to a routine that expects a NUL-terminated + // string. + constexpr const_pointer data() const noexcept + { + return ptr_; + } + + // Modifiers + + // string_view::remove_prefix() + // + // Removes the first `n` characters from the `string_view`. Note that the + // underlying string is not changed, only the view. + constexpr void remove_prefix(size_type n) + { + ABSL_HARDENING_ASSERT(n <= length_); + ptr_ += n; + length_ -= n; + } + + // string_view::remove_suffix() + // + // Removes the last `n` characters from the `string_view`. Note that the + // underlying string is not changed, only the view. + constexpr void remove_suffix(size_type n) + { + ABSL_HARDENING_ASSERT(n <= length_); + length_ -= n; + } + + // string_view::swap() + // + // Swaps this `string_view` with another `string_view`. + constexpr void swap(string_view& s) noexcept + { + auto t = *this; + *this = s; + s = t; + } + + // Explicit conversion operators + + // Converts to `std::basic_string`. + template + explicit operator std::basic_string() const + { + if (!data()) + return {}; + return std::basic_string(data(), size()); + } + + // string_view::copy() + // + // Copies the contents of the `string_view` at offset `pos` and length `n` + // into `buf`. + size_type copy(char* buf, size_type n, size_type pos = 0) const + { + if (ABSL_PREDICT_FALSE(pos > length_)) + { + base_internal::ThrowStdOutOfRange("absl::string_view::copy"); + } + size_type rlen = (std::min)(length_ - pos, n); + if (rlen > 0) + { + const char* start = ptr_ + pos; + traits_type::copy(buf, start, rlen); + } + return rlen; + } + + // string_view::substr() + // + // Returns a "substring" of the `string_view` (at offset `pos` and length + // `n`) as another string_view. This function throws `std::out_of_bounds` if + // `pos > size`. + // Use absl::ClippedSubstr if you need a truncating substr operation. + constexpr string_view substr(size_type pos = 0, size_type n = npos) const + { + return ABSL_PREDICT_FALSE(pos > length_) ? (base_internal::ThrowStdOutOfRange( + "absl::string_view::substr" + ), + string_view()) : + string_view(ptr_ + pos, Min(n, length_ - pos)); + } + + // string_view::compare() + // + // Performs a lexicographical comparison between this `string_view` and + // another `string_view` `x`, returning a negative value if `*this` is less + // than `x`, 0 if `*this` is equal to `x`, and a positive value if `*this` + // is greater than `x`. + constexpr int compare(string_view x) const noexcept + { + return CompareImpl(length_, x.length_, Min(length_, x.length_) == 0 ? 0 : ABSL_INTERNAL_STRING_VIEW_MEMCMP(ptr_, x.ptr_, Min(length_, x.length_))); + } + + // Overload of `string_view::compare()` for comparing a substring of the + // 'string_view` and another `absl::string_view`. + constexpr int compare(size_type pos1, size_type count1, string_view v) const + { + return substr(pos1, count1).compare(v); + } + + // Overload of `string_view::compare()` for comparing a substring of the + // `string_view` and a substring of another `absl::string_view`. + constexpr int compare(size_type pos1, size_type count1, string_view v, size_type pos2, size_type count2) const + { + return substr(pos1, count1).compare(v.substr(pos2, count2)); + } + + // Overload of `string_view::compare()` for comparing a `string_view` and a + // a different C-style string `s`. + constexpr int compare(const char* s) const + { + return compare(string_view(s)); + } + + // Overload of `string_view::compare()` for comparing a substring of the + // `string_view` and a different string C-style string `s`. + constexpr int compare(size_type pos1, size_type count1, const char* s) const + { + return substr(pos1, count1).compare(string_view(s)); + } + + // Overload of `string_view::compare()` for comparing a substring of the + // `string_view` and a substring of a different C-style string `s`. + constexpr int compare(size_type pos1, size_type count1, const char* s, size_type count2) const + { + return substr(pos1, count1).compare(string_view(s, count2)); + } + + // Find Utilities + + // string_view::find() + // + // Finds the first occurrence of the substring `s` within the `string_view`, + // returning the position of the first character's match, or `npos` if no + // match was found. + size_type find(string_view s, size_type pos = 0) const noexcept; + + // Overload of `string_view::find()` for finding the given character `c` + // within the `string_view`. + size_type find(char c, size_type pos = 0) const noexcept; + + // Overload of `string_view::find()` for finding a substring of a different + // C-style string `s` within the `string_view`. + size_type find(const char* s, size_type pos, size_type count) const + { + return find(string_view(s, count), pos); + } + + // Overload of `string_view::find()` for finding a different C-style string + // `s` within the `string_view`. + size_type find(const char* s, size_type pos = 0) const + { + return find(string_view(s), pos); + } + + // string_view::rfind() + // + // Finds the last occurrence of a substring `s` within the `string_view`, + // returning the position of the first character's match, or `npos` if no + // match was found. + size_type rfind(string_view s, size_type pos = npos) const noexcept; + + // Overload of `string_view::rfind()` for finding the last given character `c` + // within the `string_view`. + size_type rfind(char c, size_type pos = npos) const noexcept; + + // Overload of `string_view::rfind()` for finding a substring of a different + // C-style string `s` within the `string_view`. + size_type rfind(const char* s, size_type pos, size_type count) const + { + return rfind(string_view(s, count), pos); + } + + // Overload of `string_view::rfind()` for finding a different C-style string + // `s` within the `string_view`. + size_type rfind(const char* s, size_type pos = npos) const + { + return rfind(string_view(s), pos); + } + + // string_view::find_first_of() + // + // Finds the first occurrence of any of the characters in `s` within the + // `string_view`, returning the start position of the match, or `npos` if no + // match was found. + size_type find_first_of(string_view s, size_type pos = 0) const noexcept; + + // Overload of `string_view::find_first_of()` for finding a character `c` + // within the `string_view`. + size_type find_first_of(char c, size_type pos = 0) const noexcept + { + return find(c, pos); + } + + // Overload of `string_view::find_first_of()` for finding a substring of a + // different C-style string `s` within the `string_view`. + size_type find_first_of(const char* s, size_type pos, size_type count) const + { + return find_first_of(string_view(s, count), pos); + } + + // Overload of `string_view::find_first_of()` for finding a different C-style + // string `s` within the `string_view`. + size_type find_first_of(const char* s, size_type pos = 0) const + { + return find_first_of(string_view(s), pos); + } + + // string_view::find_last_of() + // + // Finds the last occurrence of any of the characters in `s` within the + // `string_view`, returning the start position of the match, or `npos` if no + // match was found. + size_type find_last_of(string_view s, size_type pos = npos) const noexcept; + + // Overload of `string_view::find_last_of()` for finding a character `c` + // within the `string_view`. + size_type find_last_of(char c, size_type pos = npos) const noexcept + { + return rfind(c, pos); + } + + // Overload of `string_view::find_last_of()` for finding a substring of a + // different C-style string `s` within the `string_view`. + size_type find_last_of(const char* s, size_type pos, size_type count) const + { + return find_last_of(string_view(s, count), pos); + } + + // Overload of `string_view::find_last_of()` for finding a different C-style + // string `s` within the `string_view`. + size_type find_last_of(const char* s, size_type pos = npos) const + { + return find_last_of(string_view(s), pos); + } + + // string_view::find_first_not_of() + // + // Finds the first occurrence of any of the characters not in `s` within the + // `string_view`, returning the start position of the first non-match, or + // `npos` if no non-match was found. + size_type find_first_not_of(string_view s, size_type pos = 0) const noexcept; + + // Overload of `string_view::find_first_not_of()` for finding a character + // that is not `c` within the `string_view`. + size_type find_first_not_of(char c, size_type pos = 0) const noexcept; + + // Overload of `string_view::find_first_not_of()` for finding a substring of a + // different C-style string `s` within the `string_view`. + size_type find_first_not_of(const char* s, size_type pos, size_type count) const + { + return find_first_not_of(string_view(s, count), pos); + } + + // Overload of `string_view::find_first_not_of()` for finding a different + // C-style string `s` within the `string_view`. + size_type find_first_not_of(const char* s, size_type pos = 0) const + { + return find_first_not_of(string_view(s), pos); + } + + // string_view::find_last_not_of() + // + // Finds the last occurrence of any of the characters not in `s` within the + // `string_view`, returning the start position of the last non-match, or + // `npos` if no non-match was found. + size_type find_last_not_of(string_view s, size_type pos = npos) const noexcept; + + // Overload of `string_view::find_last_not_of()` for finding a character + // that is not `c` within the `string_view`. + size_type find_last_not_of(char c, size_type pos = npos) const noexcept; + + // Overload of `string_view::find_last_not_of()` for finding a substring of a + // different C-style string `s` within the `string_view`. + size_type find_last_not_of(const char* s, size_type pos, size_type count) const + { + return find_last_not_of(string_view(s, count), pos); + } + + // Overload of `string_view::find_last_not_of()` for finding a different + // C-style string `s` within the `string_view`. + size_type find_last_not_of(const char* s, size_type pos = npos) const + { + return find_last_not_of(string_view(s), pos); + } + + private: + // The constructor from std::string delegates to this constructor. + // See the comment on that constructor for the rationale. + struct SkipCheckLengthTag + { + }; + string_view(const char* data, size_type len, SkipCheckLengthTag) noexcept + : + ptr_(data), + length_(len) + { + } + + static constexpr size_type kMaxSize = + (std::numeric_limits::max)(); + + static constexpr size_type CheckLengthInternal(size_type len) + { + return ABSL_HARDENING_ASSERT(len <= kMaxSize), len; + } + + static constexpr size_type StrlenInternal(const char* str) + { +#if defined(_MSC_VER) && _MSC_VER >= 1910 && !defined(__clang__) + // MSVC 2017+ can evaluate this at compile-time. + const char* begin = str; + while (*str != '\0') + ++str; + return str - begin; +#elif ABSL_HAVE_BUILTIN(__builtin_strlen) || \ + (defined(__GNUC__) && !defined(__clang__)) + // GCC has __builtin_strlen according to + // https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Other-Builtins.html, but + // ABSL_HAVE_BUILTIN doesn't detect that, so we use the extra checks above. + // __builtin_strlen is constexpr. + return __builtin_strlen(str); +#else + return str ? strlen(str) : 0; +#endif + } + + static constexpr size_t Min(size_type length_a, size_type length_b) + { + return length_a < length_b ? length_a : length_b; + } + + static constexpr int CompareImpl(size_type length_a, size_type length_b, int compare_result) + { + return compare_result == 0 ? static_cast(length_a > length_b) - + static_cast(length_a < length_b) : + (compare_result < 0 ? -1 : 1); + } + + const char* ptr_; + size_type length_; + }; + + // This large function is defined inline so that in a fairly common case where + // one of the arguments is a literal, the compiler can elide a lot of the + // following comparisons. + constexpr bool operator==(string_view x, string_view y) noexcept + { + return x.size() == y.size() && + (x.empty() || + ABSL_INTERNAL_STRING_VIEW_MEMCMP(x.data(), y.data(), x.size()) == 0); + } + + constexpr bool operator!=(string_view x, string_view y) noexcept + { + return !(x == y); + } + + constexpr bool operator<(string_view x, string_view y) noexcept + { + return x.compare(y) < 0; + } + + constexpr bool operator>(string_view x, string_view y) noexcept + { + return y < x; + } + + constexpr bool operator<=(string_view x, string_view y) noexcept + { + return !(y < x); + } + + constexpr bool operator>=(string_view x, string_view y) noexcept + { + return !(x < y); + } + + // IO Insertion Operator + std::ostream& operator<<(std::ostream& o, string_view piece); + + ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_STRING_VIEW_MEMCMP + +#endif // ABSL_USES_STD_STRING_VIEW + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ClippedSubstr() + // + // Like `s.substr(pos, n)`, but clips `pos` to an upper bound of `s.size()`. + // Provided because std::string_view::substr throws if `pos > size()` + inline string_view ClippedSubstr(string_view s, size_t pos, size_t n = string_view::npos) + { + pos = (std::min)(pos, static_cast(s.size())); + return s.substr(pos, n); + } + + // NullSafeStringView() + // + // Creates an `absl::string_view` from a pointer `p` even if it's null-valued. + // This function should be used where an `absl::string_view` can be created from + // a possibly-null pointer. + constexpr string_view NullSafeStringView(const char* p) + { + return p ? string_view(p) : string_view(); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STRING_VIEW_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/strip.h b/CAPI/cpp/grpc/include/absl/strings/strip.h new file mode 100644 index 00000000..a643aefb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/strip.h @@ -0,0 +1,104 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: strip.h +// ----------------------------------------------------------------------------- +// +// This file contains various functions for stripping substrings from a string. +#ifndef ABSL_STRINGS_STRIP_H_ +#define ABSL_STRINGS_STRIP_H_ + +#include +#include + +#include "absl/base/macros.h" +#include "absl/strings/ascii.h" +#include "absl/strings/match.h" +#include "absl/strings/string_view.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ConsumePrefix() + // + // Strips the `expected` prefix, if found, from the start of `str`. + // If the operation succeeded, `true` is returned. If not, `false` + // is returned and `str` is not modified. + // + // Example: + // + // absl::string_view input("abc"); + // EXPECT_TRUE(absl::ConsumePrefix(&input, "a")); + // EXPECT_EQ(input, "bc"); + inline bool ConsumePrefix(absl::string_view* str, absl::string_view expected) + { + if (!absl::StartsWith(*str, expected)) + return false; + str->remove_prefix(expected.size()); + return true; + } + // ConsumeSuffix() + // + // Strips the `expected` suffix, if found, from the end of `str`. + // If the operation succeeded, `true` is returned. If not, `false` + // is returned and `str` is not modified. + // + // Example: + // + // absl::string_view input("abcdef"); + // EXPECT_TRUE(absl::ConsumeSuffix(&input, "def")); + // EXPECT_EQ(input, "abc"); + inline bool ConsumeSuffix(absl::string_view* str, absl::string_view expected) + { + if (!absl::EndsWith(*str, expected)) + return false; + str->remove_suffix(expected.size()); + return true; + } + + // StripPrefix() + // + // Returns a view into the input string `str` with the given `prefix` removed, + // but leaving the original string intact. If the prefix does not match at the + // start of the string, returns the original string instead. + ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix( + absl::string_view str, absl::string_view prefix + ) + { + if (absl::StartsWith(str, prefix)) + str.remove_prefix(prefix.size()); + return str; + } + + // StripSuffix() + // + // Returns a view into the input string `str` with the given `suffix` removed, + // but leaving the original string intact. If the suffix does not match at the + // end of the string, returns the original string instead. + ABSL_MUST_USE_RESULT inline absl::string_view StripSuffix( + absl::string_view str, absl::string_view suffix + ) + { + if (absl::EndsWith(str, suffix)) + str.remove_suffix(suffix.size()); + return str; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_STRIP_H_ diff --git a/CAPI/cpp/grpc/include/absl/strings/substitute.h b/CAPI/cpp/grpc/include/absl/strings/substitute.h new file mode 100644 index 00000000..30ba8c13 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/strings/substitute.h @@ -0,0 +1,676 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: substitute.h +// ----------------------------------------------------------------------------- +// +// This package contains functions for efficiently performing string +// substitutions using a format string with positional notation: +// `Substitute()` and `SubstituteAndAppend()`. +// +// Unlike printf-style format specifiers, `Substitute()` functions do not need +// to specify the type of the substitution arguments. Supported arguments +// following the format string, such as strings, string_views, ints, +// floats, and bools, are automatically converted to strings during the +// substitution process. (See below for a full list of supported types.) +// +// `Substitute()` does not allow you to specify *how* to format a value, beyond +// the default conversion to string. For example, you cannot format an integer +// in hex. +// +// The format string uses positional identifiers indicated by a dollar sign ($) +// and single digit positional ids to indicate which substitution arguments to +// use at that location within the format string. +// +// A '$$' sequence in the format string causes a literal '$' character to be +// output. +// +// Example 1: +// std::string s = Substitute("$1 purchased $0 $2 for $$10. Thanks $1!", +// 5, "Bob", "Apples"); +// EXPECT_EQ("Bob purchased 5 Apples for $10. Thanks Bob!", s); +// +// Example 2: +// std::string s = "Hi. "; +// SubstituteAndAppend(&s, "My name is $0 and I am $1 years old.", "Bob", 5); +// EXPECT_EQ("Hi. My name is Bob and I am 5 years old.", s); +// +// Supported types: +// * absl::string_view, std::string, const char* (null is equivalent to "") +// * int32_t, int64_t, uint32_t, uint64_t +// * float, double +// * bool (Printed as "true" or "false") +// * pointer types other than char* (Printed as "0x", +// except that null is printed as "NULL") +// * user-defined types via the `AbslStringify()` customization point. See the +// documentation for `absl::StrCat` for an explanation on how to use this. +// +// If an invalid format string is provided, Substitute returns an empty string +// and SubstituteAndAppend does not change the provided output string. +// A format string is invalid if it: +// * ends in an unescaped $ character, +// e.g. "Hello $", or +// * calls for a position argument which is not provided, +// e.g. Substitute("Hello $2", "world"), or +// * specifies a non-digit, non-$ character after an unescaped $ character, +// e.g. "Hello $f". +// In debug mode, i.e. #ifndef NDEBUG, such errors terminate the program. + +#ifndef ABSL_STRINGS_SUBSTITUTE_H_ +#define ABSL_STRINGS_SUBSTITUTE_H_ + +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/strings/ascii.h" +#include "absl/strings/escaping.h" +#include "absl/strings/internal/stringify_sink.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/strings/strip.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace substitute_internal + { + + // Arg + // + // This class provides an argument type for `absl::Substitute()` and + // `absl::SubstituteAndAppend()`. `Arg` handles implicit conversion of various + // types to a string. (`Arg` is very similar to the `AlphaNum` class in + // `StrCat()`.) + // + // This class has implicit constructors. + class Arg + { + public: + // Overloads for string-y things + // + // Explicitly overload `const char*` so the compiler doesn't cast to `bool`. + Arg(const char* value) // NOLINT(google-explicit-constructor) + : + piece_(absl::NullSafeStringView(value)) + { + } + template + Arg( // NOLINT + const std::basic_string, Allocator>& + value + ) noexcept + : + piece_(value) + { + } + Arg(absl::string_view value) // NOLINT(google-explicit-constructor) + : + piece_(value) + { + } + + // Overloads for primitives + // + // No overloads are available for signed and unsigned char because if people + // are explicitly declaring their chars as signed or unsigned then they are + // probably using them as 8-bit integers and would probably prefer an integer + // representation. However, we can't really know, so we make the caller decide + // what to do. + Arg(char value) // NOLINT(google-explicit-constructor) + : + piece_(scratch_, 1) + { + scratch_[0] = value; + } + Arg(short value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(unsigned short value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(int value) // NOLINT(google-explicit-constructor) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(unsigned int value) // NOLINT(google-explicit-constructor) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(long value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(unsigned long value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(long long value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(unsigned long long value) // NOLINT(*) + : + piece_(scratch_, static_cast(numbers_internal::FastIntToBuffer(value, scratch_) - scratch_)) + { + } + Arg(float value) // NOLINT(google-explicit-constructor) + : + piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) + { + } + Arg(double value) // NOLINT(google-explicit-constructor) + : + piece_(scratch_, numbers_internal::SixDigitsToBuffer(value, scratch_)) + { + } + Arg(bool value) // NOLINT(google-explicit-constructor) + : + piece_(value ? "true" : "false") + { + } + + template::value>::type> + Arg( // NOLINT(google-explicit-constructor) + const T& v, + strings_internal::StringifySink&& sink = {} + ) : + piece_(strings_internal::ExtractStringification(sink, v)) + { + } + + Arg(Hex hex); // NOLINT(google-explicit-constructor) + Arg(Dec dec); // NOLINT(google-explicit-constructor) + + // vector::reference and const_reference require special help to convert + // to `Arg` because it requires two user defined conversions. + template::value && (std::is_same::reference>::value || std::is_same::const_reference>::value)>* = nullptr> + Arg(T value) // NOLINT(google-explicit-constructor) + : + Arg(static_cast(value)) + { + } + + // `void*` values, with the exception of `char*`, are printed as + // "0x". However, in the case of `nullptr`, "NULL" is printed. + Arg(const void* value); // NOLINT(google-explicit-constructor) + + // Normal enums are already handled by the integer formatters. + // This overload matches only scoped enums. + template{} && !std::is_convertible{} && !strings_internal::HasAbslStringify::value>::type> + Arg(T value) // NOLINT(google-explicit-constructor) + : + Arg(static_cast::type>(value)) + { + } + + Arg(const Arg&) = delete; + Arg& operator=(const Arg&) = delete; + + absl::string_view piece() const + { + return piece_; + } + + private: + absl::string_view piece_; + char scratch_[numbers_internal::kFastToBufferSize]; + }; + + // Internal helper function. Don't call this from outside this implementation. + // This interface may change without notice. + void SubstituteAndAppendArray(std::string* output, absl::string_view format, const absl::string_view* args_array, size_t num_args); + +#if defined(ABSL_BAD_CALL_IF) + constexpr int CalculateOneBit(const char* format) + { + // Returns: + // * 2^N for '$N' when N is in [0-9] + // * 0 for correct '$' escaping: '$$'. + // * -1 otherwise. + return (*format < '0' || *format > '9') ? (*format == '$' ? 0 : -1) : (1 << (*format - '0')); + } + + constexpr const char* SkipNumber(const char* format) + { + return !*format ? format : (format + 1); + } + + constexpr int PlaceholderBitmask(const char* format) + { + return !*format ? 0 : *format != '$' ? PlaceholderBitmask(format + 1) : + (CalculateOneBit(format + 1) | PlaceholderBitmask(SkipNumber(format + 1))); + } +#endif // ABSL_BAD_CALL_IF + + } // namespace substitute_internal + + // + // PUBLIC API + // + + // SubstituteAndAppend() + // + // Substitutes variables into a given format string and appends to a given + // output string. See file comments above for usage. + // + // The declarations of `SubstituteAndAppend()` below consist of overloads + // for passing 0 to 10 arguments, respectively. + // + // NOTE: A zero-argument `SubstituteAndAppend()` may be used within variadic + // templates to allow a variable number of arguments. + // + // Example: + // template + // void VarMsg(std::string* boilerplate, absl::string_view format, + // const Args&... args) { + // absl::SubstituteAndAppend(boilerplate, format, args...); + // } + // + inline void SubstituteAndAppend(std::string* output, absl::string_view format) + { + substitute_internal::SubstituteAndAppendArray(output, format, nullptr, 0); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0) + { + const absl::string_view args[] = {a0.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1) + { + const absl::string_view args[] = {a0.piece(), a1.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(), a5.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend(std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(), a5.piece(), a6.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend( + std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7 + ) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(), a5.piece(), a6.piece(), a7.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend( + std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8 + ) + { + const absl::string_view args[] = {a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(), a5.piece(), a6.piece(), a7.piece(), a8.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + + inline void SubstituteAndAppend( + std::string* output, absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8, const substitute_internal::Arg& a9 + ) + { + const absl::string_view args[] = { + a0.piece(), a1.piece(), a2.piece(), a3.piece(), a4.piece(), a5.piece(), a6.piece(), a7.piece(), a8.piece(), a9.piece()}; + substitute_internal::SubstituteAndAppendArray(output, format, args, ABSL_ARRAYSIZE(args)); + } + +#if defined(ABSL_BAD_CALL_IF) + // This body of functions catches cases where the number of placeholders + // doesn't match the number of data arguments. + void SubstituteAndAppend(std::string* output, const char* format) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 0, + "There were no substitution arguments " + "but this format string either has a $[0-9] in it or contains " + "an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0) + ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 1, "There was 1 substitution argument given, but " + "this format string is missing its $0, contains " + "one of $1-$9, or contains an unescaped $ character (use " + "$$ instead)"); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 3, + "There were 2 substitution arguments given, but this format string is " + "missing its $0/$1, contains one of $2-$9, or contains an " + "unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 7, + "There were 3 substitution arguments given, but " + "this format string is missing its $0/$1/$2, contains one of " + "$3-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 15, + "There were 4 substitution arguments given, but " + "this format string is missing its $0-$3, contains one of " + "$4-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 31, + "There were 5 substitution arguments given, but " + "this format string is missing its $0-$4, contains one of " + "$5-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend(std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 63, + "There were 6 substitution arguments given, but " + "this format string is missing its $0-$5, contains one of " + "$6-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend( + std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 127, + "There were 7 substitution arguments given, but " + "this format string is missing its $0-$6, contains one of " + "$7-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend( + std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 255, + "There were 8 substitution arguments given, but " + "this format string is missing its $0-$7, contains one of " + "$8-$9, or contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend( + std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 511, + "There were 9 substitution arguments given, but " + "this format string is missing its $0-$8, contains a $9, or " + "contains an unescaped $ character (use $$ instead)" + ); + + void SubstituteAndAppend( + std::string* output, const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8, const substitute_internal::Arg& a9 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 1023, + "There were 10 substitution arguments given, but this " + "format string either doesn't contain all of $0 through $9 or " + "contains an unescaped $ character (use $$ instead)" + ); +#endif // ABSL_BAD_CALL_IF + + // Substitute() + // + // Substitutes variables into a given format string. See file comments above + // for usage. + // + // The declarations of `Substitute()` below consist of overloads for passing 0 + // to 10 arguments, respectively. + // + // NOTE: A zero-argument `Substitute()` may be used within variadic templates to + // allow a variable number of arguments. + // + // Example: + // template + // void VarMsg(absl::string_view format, const Args&... args) { + // std::string s = absl::Substitute(format, args...); + + ABSL_MUST_USE_RESULT inline std::string Substitute(absl::string_view format) + { + std::string result; + SubstituteAndAppend(&result, format); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7, a8); + return result; + } + + ABSL_MUST_USE_RESULT inline std::string Substitute( + absl::string_view format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8, const substitute_internal::Arg& a9 + ) + { + std::string result; + SubstituteAndAppend(&result, format, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); + return result; + } + +#if defined(ABSL_BAD_CALL_IF) + // This body of functions catches cases where the number of placeholders + // doesn't match the number of data arguments. + std::string Substitute(const char* format) + ABSL_BAD_CALL_IF(substitute_internal::PlaceholderBitmask(format) != 0, "There were no substitution arguments " + "but this format string either has a $[0-9] in it or " + "contains an unescaped $ character (use $$ instead)"); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 1, + "There was 1 substitution argument given, but " + "this format string is missing its $0, contains one of $1-$9, " + "or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 3, + "There were 2 substitution arguments given, but " + "this format string is missing its $0/$1, contains one of " + "$2-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 7, + "There were 3 substitution arguments given, but " + "this format string is missing its $0/$1/$2, contains one of " + "$3-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 15, + "There were 4 substitution arguments given, but " + "this format string is missing its $0-$3, contains one of " + "$4-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 31, + "There were 5 substitution arguments given, but " + "this format string is missing its $0-$4, contains one of " + "$5-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 63, + "There were 6 substitution arguments given, but " + "this format string is missing its $0-$5, contains one of " + "$6-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 127, + "There were 7 substitution arguments given, but " + "this format string is missing its $0-$6, contains one of " + "$7-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute(const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 255, + "There were 8 substitution arguments given, but " + "this format string is missing its $0-$7, contains one of " + "$8-$9, or contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute( + const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 511, + "There were 9 substitution arguments given, but " + "this format string is missing its $0-$8, contains a $9, or " + "contains an unescaped $ character (use $$ instead)" + ); + + std::string Substitute( + const char* format, const substitute_internal::Arg& a0, const substitute_internal::Arg& a1, const substitute_internal::Arg& a2, const substitute_internal::Arg& a3, const substitute_internal::Arg& a4, const substitute_internal::Arg& a5, const substitute_internal::Arg& a6, const substitute_internal::Arg& a7, const substitute_internal::Arg& a8, const substitute_internal::Arg& a9 + ) + ABSL_BAD_CALL_IF( + substitute_internal::PlaceholderBitmask(format) != 1023, + "There were 10 substitution arguments given, but this " + "format string either doesn't contain all of $0 through $9 or " + "contains an unescaped $ character (use $$ instead)" + ); +#endif // ABSL_BAD_CALL_IF + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_STRINGS_SUBSTITUTE_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/barrier.h b/CAPI/cpp/grpc/include/absl/synchronization/barrier.h new file mode 100644 index 00000000..b04d6e50 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/barrier.h @@ -0,0 +1,84 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// barrier.h +// ----------------------------------------------------------------------------- + +#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_ +#define ABSL_SYNCHRONIZATION_BARRIER_H_ + +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Barrier + // + // This class creates a barrier which blocks threads until a prespecified + // threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes + // the `Barrier` by calling `Block()` on the barrier, which will block that + // thread; no call to `Block()` will return until `num_threads` threads have + // called it. + // + // Exactly one call to `Block()` will return `true`, which is then responsible + // for destroying the barrier; because stack allocation will cause the barrier + // to be deleted when it is out of scope, barriers should not be stack + // allocated. + // + // Example: + // + // // Main thread creates a `Barrier`: + // barrier = new Barrier(num_threads); + // + // // Each participating thread could then call: + // if (barrier->Block()) delete barrier; // Exactly one call to `Block()` + // // returns `true`; that call + // // deletes the barrier. + class Barrier + { + public: + // `num_threads` is the number of threads that will participate in the barrier + explicit Barrier(int num_threads) : + num_to_block_(num_threads), + num_to_exit_(num_threads) + { + } + + Barrier(const Barrier&) = delete; + Barrier& operator=(const Barrier&) = delete; + + // Barrier::Block() + // + // Blocks the current thread, and returns only when the `num_threads` + // threshold of threads utilizing this barrier has been reached. `Block()` + // returns `true` for precisely one caller, which may then destroy the + // barrier. + // + // Memory ordering: For any threads X and Y, any action taken by X + // before X calls `Block()` will be visible to Y after Y returns from + // `Block()`. + bool Block(); + + private: + Mutex lock_; + int num_to_block_ ABSL_GUARDED_BY(lock_); + int num_to_exit_ ABSL_GUARDED_BY(lock_); + }; + + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_SYNCHRONIZATION_BARRIER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/blocking_counter.h b/CAPI/cpp/grpc/include/absl/synchronization/blocking_counter.h new file mode 100644 index 00000000..cd50359f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/blocking_counter.h @@ -0,0 +1,103 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// blocking_counter.h +// ----------------------------------------------------------------------------- + +#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_ +#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_ + +#include + +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // BlockingCounter + // + // This class allows a thread to block for a pre-specified number of actions. + // `BlockingCounter` maintains a single non-negative abstract integer "count" + // with an initial value `initial_count`. A thread can then call `Wait()` on + // this blocking counter to block until the specified number of events occur; + // worker threads then call 'DecrementCount()` on the counter upon completion of + // their work. Once the counter's internal "count" reaches zero, the blocked + // thread unblocks. + // + // A `BlockingCounter` requires the following: + // - its `initial_count` is non-negative. + // - the number of calls to `DecrementCount()` on it is at most + // `initial_count`. + // - `Wait()` is called at most once on it. + // + // Given the above requirements, a `BlockingCounter` provides the following + // guarantees: + // - Once its internal "count" reaches zero, no legal action on the object + // can further change the value of "count". + // - When `Wait()` returns, it is legal to destroy the `BlockingCounter`. + // - When `Wait()` returns, the number of calls to `DecrementCount()` on + // this blocking counter exactly equals `initial_count`. + // + // Example: + // BlockingCounter bcount(N); // there are N items of work + // ... Allow worker threads to start. + // ... On completing each work item, workers do: + // ... bcount.DecrementCount(); // an item of work has been completed + // + // bcount.Wait(); // wait for all work to be complete + // + class BlockingCounter + { + public: + explicit BlockingCounter(int initial_count); + + BlockingCounter(const BlockingCounter&) = delete; + BlockingCounter& operator=(const BlockingCounter&) = delete; + + // BlockingCounter::DecrementCount() + // + // Decrements the counter's "count" by one, and return "count == 0". This + // function requires that "count != 0" when it is called. + // + // Memory ordering: For any threads X and Y, any action taken by X + // before it calls `DecrementCount()` is visible to thread Y after + // Y's call to `DecrementCount()`, provided Y's call returns `true`. + bool DecrementCount(); + + // BlockingCounter::Wait() + // + // Blocks until the counter reaches zero. This function may be called at most + // once. On return, `DecrementCount()` will have been called "initial_count" + // times and the blocking counter may be destroyed. + // + // Memory ordering: For any threads X and Y, any action taken by X + // before X calls `DecrementCount()` is visible to Y after Y returns + // from `Wait()`. + void Wait(); + + private: + Mutex lock_; + std::atomic count_; + int num_waiting_ ABSL_GUARDED_BY(lock_); + bool done_ ABSL_GUARDED_BY(lock_); + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/create_thread_identity.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/create_thread_identity.h new file mode 100644 index 00000000..369c3ab8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/create_thread_identity.h @@ -0,0 +1,60 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Interface for getting the current ThreadIdentity, creating one if necessary. +// See thread_identity.h. +// +// This file is separate from thread_identity.h because creating a new +// ThreadIdentity requires slightly higher level libraries (per_thread_sem +// and low_level_alloc) than accessing an existing one. This separation allows +// us to have a smaller //absl/base:base. + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_ + +#include "absl/base/internal/thread_identity.h" +#include "absl/base/port.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + + // Allocates and attaches a ThreadIdentity object for the calling thread. + // For private use only. + base_internal::ThreadIdentity* CreateThreadIdentity(); + + // Returns the ThreadIdentity object representing the calling thread; guaranteed + // to be unique for its lifetime. The returned object will remain valid for the + // program's lifetime; although it may be re-assigned to a subsequent thread. + // If one does not exist for the calling thread, allocate it now. + inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() + { + base_internal::ThreadIdentity* identity = + base_internal::CurrentThreadIdentityIfPresent(); + if (ABSL_PREDICT_FALSE(identity == nullptr)) + { + return CreateThreadIdentity(); + } + return identity; + } + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/futex.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/futex.h new file mode 100644 index 00000000..ab08990f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/futex.h @@ -0,0 +1,189 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ + +#include "absl/base/config.h" + +#ifndef _WIN32 +#include +#include +#endif + +#ifdef __linux__ +#include +#include +#endif + +#include +#include +#include + +#include +#include +#include + +#include "absl/base/optimization.h" +#include "absl/synchronization/internal/kernel_timeout.h" + +#ifdef ABSL_INTERNAL_HAVE_FUTEX +#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line +#elif defined(__BIONIC__) +// Bionic supports all the futex operations we need even when some of the futex +// definitions are missing. +#define ABSL_INTERNAL_HAVE_FUTEX +#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME) +// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28. +#define ABSL_INTERNAL_HAVE_FUTEX +#endif + +#ifdef ABSL_INTERNAL_HAVE_FUTEX + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_WAIT_BITSET +#define FUTEX_WAIT_BITSET 9 +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#ifndef FUTEX_CLOCK_REALTIME +#define FUTEX_CLOCK_REALTIME 256 +#endif +#ifndef FUTEX_BITSET_MATCH_ANY +#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 + using FutexTimespec = struct timespec; +#else + // Some libc implementations have switched to an unconditional 64-bit `time_t` + // definition. This means that `struct timespec` may not match the layout + // expected by the kernel ABI on 32-bit platforms. So we define the + // FutexTimespec that matches the kernel timespec definition. It should be safe + // to use this struct for 64-bit userspace builds too, since it will use another + // SYS_futex kernel call with 64-bit tv_sec inside timespec. + struct FutexTimespec + { + long tv_sec; // NOLINT + long tv_nsec; // NOLINT + }; +#endif + + class FutexImpl + { + public: + // Atomically check that `*v == val`, and if it is, then sleep until the until + // woken by `Wake()`. + static int Wait(std::atomic* v, int32_t val) + { + return WaitAbsoluteTimeout(v, val, nullptr); + } + + // Atomically check that `*v == val`, and if it is, then sleep until + // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`. + static int WaitAbsoluteTimeout(std::atomic* v, int32_t val, const struct timespec* abs_timeout) + { + FutexTimespec ts; + // https://locklessinc.com/articles/futex_cheat_sheet/ + // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. + auto err = syscall( + SYS_futex, reinterpret_cast(v), FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val, ToFutexTimespec(abs_timeout, &ts), nullptr, FUTEX_BITSET_MATCH_ANY + ); + if (err != 0) + { + return -errno; + } + return 0; + } + + // Atomically check that `*v == val`, and if it is, then sleep until + // `*rel_timeout` has elapsed, or until woken by `Wake()`. + static int WaitRelativeTimeout(std::atomic* v, int32_t val, const struct timespec* rel_timeout) + { + FutexTimespec ts; + // Atomically check that the futex value is still 0, and if it + // is, sleep until abs_timeout or until woken by FUTEX_WAKE. + auto err = + syscall(SYS_futex, reinterpret_cast(v), FUTEX_PRIVATE_FLAG, val, ToFutexTimespec(rel_timeout, &ts)); + if (err != 0) + { + return -errno; + } + return 0; + } + + // Wakes at most `count` waiters that have entered the sleep state on `v`. + static int Wake(std::atomic* v, int32_t count) + { + auto err = syscall(SYS_futex, reinterpret_cast(v), FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); + if (ABSL_PREDICT_FALSE(err < 0)) + { + return -errno; + } + return 0; + } + + private: + static FutexTimespec* ToFutexTimespec(const struct timespec* userspace_ts, FutexTimespec* futex_ts) + { + if (userspace_ts == nullptr) + { + return nullptr; + } + + using FutexSeconds = decltype(futex_ts->tv_sec); + using FutexNanoseconds = decltype(futex_ts->tv_nsec); + + constexpr auto kMaxSeconds{(std::numeric_limits::max)()}; + if (userspace_ts->tv_sec > kMaxSeconds) + { + futex_ts->tv_sec = kMaxSeconds; + } + else + { + futex_ts->tv_sec = static_cast(userspace_ts->tv_sec); + } + futex_ts->tv_nsec = static_cast(userspace_ts->tv_nsec); + return futex_ts; + } + }; + + class Futex : public FutexImpl + { + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_FUTEX + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/futex_waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/futex_waiter.h new file mode 100644 index 00000000..9af3a849 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/futex_waiter.h @@ -0,0 +1,68 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/futex.h" +#include "absl/synchronization/internal/waiter_base.h" + +#ifdef ABSL_INTERNAL_HAVE_FUTEX + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#define ABSL_INTERNAL_HAVE_FUTEX_WAITER 1 + + class FutexWaiter : public WaiterCrtp + { + public: + FutexWaiter() : + futex_(0) + { + } + + bool Wait(KernelTimeout t); + void Post(); + void Poke(); + + static constexpr char kName[] = "FutexWaiter"; + + private: + // Atomically check that `*v == val`, and if it is, then sleep until the + // timeout `t` has been reached, or until woken by `Wake()`. + static int WaitUntil(std::atomic* v, int32_t val, KernelTimeout t); + + // Futexes are defined by specification to be 32-bits. + // Thus std::atomic must be just an int32_t with lockfree methods. + std::atomic futex_; + static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex"); + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_FUTEX + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/graphcycles.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/graphcycles.h new file mode 100644 index 00000000..58ededf7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/graphcycles.h @@ -0,0 +1,151 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_ + +// GraphCycles detects the introduction of a cycle into a directed +// graph that is being built up incrementally. +// +// Nodes are identified by small integers. It is not possible to +// record multiple edges with the same (source, destination) pair; +// requests to add an edge where one already exists are silently +// ignored. +// +// It is also not possible to introduce a cycle; an attempt to insert +// an edge that would introduce a cycle fails and returns false. +// +// GraphCycles uses no internal locking; calls into it should be +// serialized externally. + +// Performance considerations: +// Works well on sparse graphs, poorly on dense graphs. +// Extra information is maintained incrementally to detect cycles quickly. +// InsertEdge() is very fast when the edge already exists, and reasonably fast +// otherwise. +// FindPath() is linear in the size of the graph. +// The current implementation uses O(|V|+|E|) space. + +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + + // Opaque identifier for a graph node. + struct GraphId + { + uint64_t handle; + + bool operator==(const GraphId& x) const + { + return handle == x.handle; + } + bool operator!=(const GraphId& x) const + { + return handle != x.handle; + } + }; + + // Return an invalid graph id that will never be assigned by GraphCycles. + inline GraphId InvalidGraphId() + { + return GraphId{0}; + } + + class GraphCycles + { + public: + GraphCycles(); + ~GraphCycles(); + + // Return the id to use for ptr, assigning one if necessary. + // Subsequent calls with the same ptr value will return the same id + // until Remove(). + GraphId GetId(void* ptr); + + // Remove "ptr" from the graph. Its corresponding node and all + // edges to and from it are removed. + void RemoveNode(void* ptr); + + // Return the pointer associated with id, or nullptr if id is not + // currently in the graph. + void* Ptr(GraphId id); + + // Attempt to insert an edge from source_node to dest_node. If the + // edge would introduce a cycle, return false without making any + // changes. Otherwise add the edge and return true. + bool InsertEdge(GraphId source_node, GraphId dest_node); + + // Remove any edge that exists from source_node to dest_node. + void RemoveEdge(GraphId source_node, GraphId dest_node); + + // Return whether node exists in the graph. + bool HasNode(GraphId node); + + // Return whether there is an edge directly from source_node to dest_node. + bool HasEdge(GraphId source_node, GraphId dest_node) const; + + // Return whether dest_node is reachable from source_node + // by following edges. + bool IsReachable(GraphId source_node, GraphId dest_node) const; + + // Find a path from "source" to "dest". If such a path exists, + // place the nodes on the path in the array path[], and return + // the number of nodes on the path. If the path is longer than + // max_path_len nodes, only the first max_path_len nodes are placed + // in path[]. The client should compare the return value with + // max_path_len" to see when this occurs. If no path exists, return + // 0. Any valid path stored in path[] will start with "source" and + // end with "dest". There is no guarantee that the path is the + // shortest, but no node will appear twice in the path, except the + // source and destination node if they are identical; therefore, the + // return value is at most one greater than the number of nodes in + // the graph. + int FindPath(GraphId source, GraphId dest, int max_path_len, GraphId path[]) const; + + // Update the stack trace recorded for id with the current stack + // trace if the last time it was updated had a smaller priority + // than the priority passed on this call. + // + // *get_stack_trace is called to get the stack trace. + void UpdateStackTrace(GraphId id, int priority, int (*get_stack_trace)(void**, int)); + + // Set *ptr to the beginning of the array that holds the recorded + // stack trace for id and return the depth of the stack trace. + int GetStackTrace(GraphId id, void*** ptr); + + // Check internal invariants. Crashes on failure, returns true on success. + // Expensive: should only be called from graphcycles_test.cc. + bool CheckInvariants() const; + + // ---------------------------------------------------- + struct Rep; + + private: + Rep* rep_; // opaque representation + GraphCycles(const GraphCycles&) = delete; + GraphCycles& operator=(const GraphCycles&) = delete; + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/kernel_timeout.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/kernel_timeout.h new file mode 100644 index 00000000..e65fdbe3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/kernel_timeout.h @@ -0,0 +1,202 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ + +#ifndef _WIN32 +#include +#endif + +#include +#include // NOLINT(build/c++11) +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + + // An optional timeout, with nanosecond granularity. + // + // This is a private low-level API for use by a handful of low-level + // components. Higher-level components should build APIs based on + // absl::Time and absl::Duration. + class KernelTimeout + { + public: + // Construct an absolute timeout that should expire at `t`. + explicit KernelTimeout(absl::Time t); + + // Construct a relative timeout that should expire after `d`. + explicit KernelTimeout(absl::Duration d); + + // Infinite timeout. + constexpr KernelTimeout() : + rep_(kNoTimeout) + { + } + + // A more explicit factory for those who prefer it. + // Equivalent to `KernelTimeout()`. + static constexpr KernelTimeout Never() + { + return KernelTimeout(); + } + + // Returns true if there is a timeout that will eventually expire. + // Returns false if the timeout is infinite. + bool has_timeout() const + { + return rep_ != kNoTimeout; + } + + // If `has_timeout()` is true, returns true if the timeout was provided as an + // `absl::Time`. The return value is undefined if `has_timeout()` is false + // because all indefinite timeouts are equivalent. + bool is_absolute_timeout() const + { + return (rep_ & 1) == 0; + } + + // If `has_timeout()` is true, returns true if the timeout was provided as an + // `absl::Duration`. The return value is undefined if `has_timeout()` is false + // because all indefinite timeouts are equivalent. + bool is_relative_timeout() const + { + return (rep_ & 1) == 1; + } + + // Convert to `struct timespec` for interfaces that expect an absolute + // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to + // a reasonable absolute timeout, but callers should to test has_timeout() and + // is_relative_timeout() and prefer to use a more appropriate interface. + struct timespec MakeAbsTimespec() const; + + // Convert to `struct timespec` for interfaces that expect a relative + // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to + // a reasonable relative timeout, but callers should to test has_timeout() and + // is_absolute_timeout() and prefer to use a more appropriate interface. Since + // the return value is a relative duration, it should be recomputed by calling + // this method in the case of a spurious wakeup. + struct timespec MakeRelativeTimespec() const; + +#ifndef _WIN32 + // Convert to `struct timespec` for interfaces that expect an absolute timeout + // on a specific clock `c`. This is similar to `MakeAbsTimespec()`, but + // callers usually want to use this method with `CLOCK_MONOTONIC` when + // relative timeouts are requested, and when the appropriate interface expects + // an absolute timeout relative to a specific clock (for example, + // pthread_cond_clockwait() or sem_clockwait()). If !has_timeout(), attempts + // to convert to a reasonable absolute timeout, but callers should to test + // has_timeout() prefer to use a more appropriate interface. + struct timespec MakeClockAbsoluteTimespec(clockid_t c) const; +#endif + + // Convert to unix epoch nanos for interfaces that expect an absolute timeout + // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to + // convert to a reasonable absolute timeout, but callers should to test + // has_timeout() and is_relative_timeout() and prefer to use a more + // appropriate interface. + int64_t MakeAbsNanos() const; + + // Converts to milliseconds from now, or INFINITE when + // !has_timeout(). For use by SleepConditionVariableSRW on + // Windows. Callers should recognize that the return value is a + // relative duration (it should be recomputed by calling this method + // in the case of a spurious wakeup). + // This header file may be included transitively by public header files, + // so we define our own DWORD and INFINITE instead of getting them from + // and . + typedef unsigned long DWord; // NOLINT + DWord InMillisecondsFromNow() const; + + // Convert to std::chrono::time_point for interfaces that expect an absolute + // timeout, like std::condition_variable::wait_until(). If !has_timeout() or + // is_relative_timeout(), attempts to convert to a reasonable absolute + // timeout, but callers should test has_timeout() and is_relative_timeout() + // and prefer to use a more appropriate interface. + std::chrono::time_point ToChronoTimePoint() const; + + // Convert to std::chrono::time_point for interfaces that expect a relative + // timeout, like std::condition_variable::wait_for(). If !has_timeout() or + // is_absolute_timeout(), attempts to convert to a reasonable relative + // timeout, but callers should test has_timeout() and is_absolute_timeout() + // and prefer to use a more appropriate interface. Since the return value is a + // relative duration, it should be recomputed by calling this method in the + // case of a spurious wakeup. + std::chrono::nanoseconds ToChronoDuration() const; + + // Returns true if steady (aka monotonic) clocks are supported by the system. + // This method exists because go/btm requires synchronized clocks, and + // thus requires we use the system (aka walltime) clock. + static constexpr bool SupportsSteadyClock() + { + return true; + } + + private: + // Returns the current time, expressed as a count of nanoseconds since the + // epoch used by an arbitrary clock. The implementation tries to use a steady + // (monotonic) clock if one is available. + static int64_t SteadyClockNow(); + + // Internal representation. + // - If the value is kNoTimeout, then the timeout is infinite, and + // has_timeout() will return true. + // - If the low bit is 0, then the high 63 bits is the number of nanoseconds + // after the unix epoch. + // - If the low bit is 1, then the high 63 bits is the number of nanoseconds + // after the epoch used by SteadyClockNow(). + // + // In all cases the time is stored as an absolute time, the only difference is + // the clock epoch. The use of absolute times is important since in the case + // of a relative timeout with a spurious wakeup, the program would have to + // restart the wait, and thus needs a way of recomputing the remaining time. + uint64_t rep_; + + // Returns the number of nanoseconds stored in the internal representation. + // When combined with the clock epoch indicated by the low bit (which is + // accessed through is_absolute_timeout() and is_relative_timeout()), the + // return value is used to compute when the timeout should occur. + int64_t RawAbsNanos() const + { + return static_cast(rep_ >> 1); + } + + // Converts to nanoseconds from now. Since the return value is a relative + // duration, it should be recomputed by calling this method in the case of a + // spurious wakeup. + int64_t InNanosecondsFromNow() const; + + // A value that represents no timeout (or an infinite timeout). + static constexpr uint64_t kNoTimeout = (std::numeric_limits::max)(); + + // The maximum value that can be stored in the high 63 bits. + static constexpr int64_t kMaxNanos = (std::numeric_limits::max)(); + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/per_thread_sem.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/per_thread_sem.h new file mode 100644 index 00000000..d4075b06 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/per_thread_sem.h @@ -0,0 +1,135 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// PerThreadSem is a low-level synchronization primitive controlling the +// runnability of a single thread, used internally by Mutex and CondVar. +// +// This is NOT a general-purpose synchronization mechanism, and should not be +// used directly by applications. Applications should use Mutex and CondVar. +// +// The semantics of PerThreadSem are the same as that of a counting semaphore. +// Each thread maintains an abstract "count" value associated with its identity. + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_ + +#include + +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "absl/synchronization/internal/kernel_timeout.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class Mutex; + + namespace synchronization_internal + { + + class PerThreadSem + { + public: + PerThreadSem() = delete; + PerThreadSem(const PerThreadSem&) = delete; + PerThreadSem& operator=(const PerThreadSem&) = delete; + + // Routine invoked periodically (once a second) by a background thread. + // Has no effect on user-visible state. + static void Tick(base_internal::ThreadIdentity* identity); + + // --------------------------------------------------------------------------- + // Routines used by autosizing threadpools to detect when threads are + // blocked. Each thread has a counter pointer, initially zero. If non-zero, + // the implementation atomically increments the counter when it blocks on a + // semaphore, a decrements it again when it wakes. This allows a threadpool + // to keep track of how many of its threads are blocked. + // SetThreadBlockedCounter() should be used only by threadpool + // implementations. GetThreadBlockedCounter() should be used by modules that + // block threads; if the pointer returned is non-zero, the location should be + // incremented before the thread blocks, and decremented after it wakes. + static void SetThreadBlockedCounter(std::atomic* counter); + static std::atomic* GetThreadBlockedCounter(); + + private: + // Create the PerThreadSem associated with "identity". Initializes count=0. + // REQUIRES: May only be called by ThreadIdentity. + static inline void Init(base_internal::ThreadIdentity* identity); + + // Increments "identity"'s count. + static inline void Post(base_internal::ThreadIdentity* identity); + + // Waits until either our count > 0 or t has expired. + // If count > 0, decrements count and returns true. Otherwise returns false. + // !t.has_timeout() => Wait(t) will return true. + static inline bool Wait(KernelTimeout t); + + // Permitted callers. + friend class PerThreadSemTest; + friend class absl::Mutex; + friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*); + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" +{ + void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)( + absl::base_internal::ThreadIdentity* identity + ); + void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)( + absl::base_internal::ThreadIdentity* identity + ); + bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)( + absl::synchronization_internal::KernelTimeout t + ); + void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)( + absl::base_internal::ThreadIdentity* identity + ); +} // extern "C" + +void absl::synchronization_internal::PerThreadSem::Init( + absl::base_internal::ThreadIdentity* identity +) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit) + (identity); +} + +void absl::synchronization_internal::PerThreadSem::Post( + absl::base_internal::ThreadIdentity* identity +) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost) + (identity); +} + +bool absl::synchronization_internal::PerThreadSem::Wait( + absl::synchronization_internal::KernelTimeout t +) +{ + return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t); +} + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/pthread_waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/pthread_waiter.h new file mode 100644 index 00000000..bcef36b4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/pthread_waiter.h @@ -0,0 +1,63 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_ + +#ifndef _WIN32 +#include + +#include "absl/base/config.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/waiter_base.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#define ABSL_INTERNAL_HAVE_PTHREAD_WAITER 1 + + class PthreadWaiter : public WaiterCrtp + { + public: + PthreadWaiter(); + + bool Wait(KernelTimeout t); + void Post(); + void Poke(); + + static constexpr char kName[] = "PthreadWaiter"; + + private: + int TimedWait(KernelTimeout t); + + // REQUIRES: mu_ must be held. + void InternalCondVarPoke(); + + pthread_mutex_t mu_; + pthread_cond_t cv_; + int waiter_count_; + int wakeup_count_; // Unclaimed wakeups. + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ndef _WIN32 + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/sem_waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/sem_waiter.h new file mode 100644 index 00000000..e900ebb1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/sem_waiter.h @@ -0,0 +1,68 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_SEMAPHORE_H +#include + +#include +#include + +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/futex.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/waiter_base.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#define ABSL_INTERNAL_HAVE_SEM_WAITER 1 + + class SemWaiter : public WaiterCrtp + { + public: + SemWaiter(); + + bool Wait(KernelTimeout t); + void Post(); + void Poke(); + + static constexpr char kName[] = "SemWaiter"; + + private: + int TimedWait(KernelTimeout t); + + sem_t sem_; + + // This seems superfluous, but for Poke() we need to cause spurious + // wakeups on the semaphore. Hence we can't actually use the + // semaphore's count. + std::atomic wakeups_; + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_SEMAPHORE_H + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/stdcpp_waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/stdcpp_waiter.h new file mode 100644 index 00000000..8d01836e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/stdcpp_waiter.h @@ -0,0 +1,59 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_ + +#include // NOLINT(build/c++11) +#include // NOLINT(build/c++11) + +#include "absl/base/config.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/waiter_base.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#define ABSL_INTERNAL_HAVE_STDCPP_WAITER 1 + + class StdcppWaiter : public WaiterCrtp + { + public: + StdcppWaiter(); + + bool Wait(KernelTimeout t); + void Post(); + void Poke(); + + static constexpr char kName[] = "StdcppWaiter"; + + private: + // REQUIRES: mu_ must be held. + void InternalCondVarPoke(); + + std::mutex mu_; + std::condition_variable cv_; + int waiter_count_; + int wakeup_count_; // Unclaimed wakeups. + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/thread_pool.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/thread_pool.h new file mode 100644 index 00000000..c783ce42 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/thread_pool.h @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_ + +#include +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/functional/any_invocable.h" +#include "absl/synchronization/mutex.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + + // A simple ThreadPool implementation for tests. + class ThreadPool + { + public: + explicit ThreadPool(int num_threads) + { + threads_.reserve(num_threads); + for (int i = 0; i < num_threads; ++i) + { + threads_.push_back(std::thread(&ThreadPool::WorkLoop, this)); + } + } + + ThreadPool(const ThreadPool&) = delete; + ThreadPool& operator=(const ThreadPool&) = delete; + + ~ThreadPool() + { + { + absl::MutexLock l(&mu_); + for (size_t i = 0; i < threads_.size(); i++) + { + queue_.push(nullptr); // Shutdown signal. + } + } + for (auto& t : threads_) + { + t.join(); + } + } + + // Schedule a function to be run on a ThreadPool thread immediately. + void Schedule(absl::AnyInvocable func) + { + assert(func != nullptr); + absl::MutexLock l(&mu_); + queue_.push(std::move(func)); + } + + private: + bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) + { + return !queue_.empty(); + } + + void WorkLoop() + { + while (true) + { + absl::AnyInvocable func; + { + absl::MutexLock l(&mu_); + mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable)); + func = std::move(queue_.front()); + queue_.pop(); + } + if (func == nullptr) + { // Shutdown signal. + break; + } + func(); + } + } + + absl::Mutex mu_; + std::queue> queue_ ABSL_GUARDED_BY(mu_); + std::vector threads_; + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter.h new file mode 100644 index 00000000..48db44db --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter.h @@ -0,0 +1,71 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_ + +#include "absl/base/config.h" +#include "absl/synchronization/internal/futex_waiter.h" +#include "absl/synchronization/internal/pthread_waiter.h" +#include "absl/synchronization/internal/sem_waiter.h" +#include "absl/synchronization/internal/stdcpp_waiter.h" +#include "absl/synchronization/internal/win32_waiter.h" + +// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE= +#define ABSL_WAITER_MODE_FUTEX 0 +#define ABSL_WAITER_MODE_SEM 1 +#define ABSL_WAITER_MODE_CONDVAR 2 +#define ABSL_WAITER_MODE_WIN32 3 +#define ABSL_WAITER_MODE_STDCPP 4 + +#if defined(ABSL_FORCE_WAITER_MODE) +#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE +#elif defined(ABSL_INTERNAL_HAVE_WIN32_WAITER) +#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32 +#elif defined(ABSL_INTERNAL_HAVE_FUTEX_WAITER) +#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX +#elif defined(ABSL_INTERNAL_HAVE_SEM_WAITER) +#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM +#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_WAITER) +#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR +#elif defined(ABSL_INTERNAL_HAVE_STDCPP_WAITER) +#define ABSL_WAITER_MODE ABSL_WAITER_MODE_STDCPP +#else +#error ABSL_WAITER_MODE is undefined +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX + using Waiter = FutexWaiter; +#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM + using Waiter = SemWaiter; +#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR + using Waiter = PthreadWaiter; +#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32 + using Waiter = Win32Waiter; +#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_STDCPP + using Waiter = StdcppWaiter; +#endif + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter_base.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter_base.h new file mode 100644 index 00000000..c1a73db7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/waiter_base.h @@ -0,0 +1,96 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_ + +#include "absl/base/config.h" +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/kernel_timeout.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + + // `Waiter` is a platform specific semaphore implementation that `PerThreadSem` + // waits on to implement blocking in `absl::Mutex`. Implementations should + // inherit from `WaiterCrtp` and must implement `Wait()`, `Post()`, and `Poke()` + // as described in `WaiterBase`. `waiter.h` selects the implementation and uses + // static-dispatch for performance. + class WaiterBase + { + public: + WaiterBase() = default; + + // Not copyable or movable + WaiterBase(const WaiterBase&) = delete; + WaiterBase& operator=(const WaiterBase&) = delete; + + // Blocks the calling thread until a matching call to `Post()` or + // `t` has passed. Returns `true` if woken (`Post()` called), + // `false` on timeout. + // + // bool Wait(KernelTimeout t); + + // Restart the caller of `Wait()` as with a normal semaphore. + // + // void Post(); + + // If anyone is waiting, wake them up temporarily and cause them to + // call `MaybeBecomeIdle()`. They will then return to waiting for a + // `Post()` or timeout. + // + // void Poke(); + + // Returns the name of this implementation. Used only for debugging. + // + // static constexpr char kName[]; + + // How many periods to remain idle before releasing resources +#ifndef ABSL_HAVE_THREAD_SANITIZER + static constexpr int kIdlePeriods = 60; +#else + // Memory consumption under ThreadSanitizer is a serious concern, + // so we release resources sooner. The value of 1 leads to 1 to 2 second + // delay before marking a thread as idle. + static constexpr int kIdlePeriods = 1; +#endif + + protected: + static void MaybeBecomeIdle(); + }; + + template + class WaiterCrtp : public WaiterBase + { + public: + // Returns the Waiter associated with the identity. + static T* GetWaiter(base_internal::ThreadIdentity* identity) + { + static_assert( + sizeof(T) <= sizeof(base_internal::ThreadIdentity::WaiterState), + "Insufficient space for Waiter" + ); + return reinterpret_cast(identity->waiter_state.data); + } + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/internal/win32_waiter.h b/CAPI/cpp/grpc/include/absl/synchronization/internal/win32_waiter.h new file mode 100644 index 00000000..bed5c822 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/internal/win32_waiter.h @@ -0,0 +1,73 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_ +#define ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_ + +#ifdef _WIN32 +#include +#endif + +#if defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA + +#include "absl/base/config.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/waiter_base.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace synchronization_internal + { + +#define ABSL_INTERNAL_HAVE_WIN32_WAITER 1 + + class Win32Waiter : public WaiterCrtp + { + public: + Win32Waiter(); + + bool Wait(KernelTimeout t); + void Post(); + void Poke(); + + static constexpr char kName[] = "Win32Waiter"; + + private: + // WinHelper - Used to define utilities for accessing the lock and + // condition variable storage once the types are complete. + class WinHelper; + + // REQUIRES: WinHelper::GetLock(this) must be held. + void InternalCondVarPoke(); + + // We can't include Windows.h in our headers, so we use aligned character + // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE. + // SRW locks and condition variables do not need to be explicitly destroyed. + // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock + // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with + alignas(void*) unsigned char mu_storage_[sizeof(void*)]; + alignas(void*) unsigned char cv_storage_[sizeof(void*)]; + int waiter_count_; + int wakeup_count_; + }; + + } // namespace synchronization_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA + +#endif // ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/mutex.h b/CAPI/cpp/grpc/include/absl/synchronization/mutex.h new file mode 100644 index 00000000..79f5a6f2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/mutex.h @@ -0,0 +1,1222 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// mutex.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `Mutex` -- a mutually exclusive lock -- and the +// most common type of synchronization primitive for facilitating locks on +// shared resources. A mutex is used to prevent multiple threads from accessing +// and/or writing to a shared resource concurrently. +// +// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional +// features: +// * Conditional predicates intrinsic to the `Mutex` object +// * Shared/reader locks, in addition to standard exclusive/writer locks +// * Deadlock detection and debug support. +// +// The following helper classes are also defined within this file: +// +// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/ +// write access within the current scope. +// +// ReaderMutexLock +// - An RAII wrapper to acquire and release a `Mutex` for shared/read +// access within the current scope. +// +// WriterMutexLock +// - Effectively an alias for `MutexLock` above, designed for use in +// distinguishing reader and writer locks within code. +// +// In addition to simple mutex locks, this file also defines ways to perform +// locking under certain conditions. +// +// Condition - (Preferred) Used to wait for a particular predicate that +// depends on state protected by the `Mutex` to become true. +// CondVar - A lower-level variant of `Condition` that relies on +// application code to explicitly signal the `CondVar` when +// a condition has been met. +// +// See below for more information on using `Condition` or `CondVar`. +// +// Mutexes and mutex behavior can be quite complicated. The information within +// this header file is limited, as a result. Please consult the Mutex guide for +// more complete information and examples. + +#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_ +#define ABSL_SYNCHRONIZATION_MUTEX_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/const_init.h" +#include "absl/base/internal/identity.h" +#include "absl/base/internal/low_level_alloc.h" +#include "absl/base/internal/thread_identity.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/internal/kernel_timeout.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class Condition; + struct SynchWaitParams; + + // ----------------------------------------------------------------------------- + // Mutex + // ----------------------------------------------------------------------------- + // + // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock + // on some resource, typically a variable or data structure with associated + // invariants. Proper usage of mutexes prevents concurrent access by different + // threads to the same resource. + // + // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`. + // The `Lock()` operation *acquires* a `Mutex` (in a state known as an + // *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a + // Mutex. During the span of time between the Lock() and Unlock() operations, + // a mutex is said to be *held*. By design, all mutexes support exclusive/write + // locks, as this is the most common way to use a mutex. + // + // Mutex operations are only allowed under certain conditions; otherwise an + // operation is "invalid", and disallowed by the API. The conditions concern + // both the current state of the mutex and the identity of the threads that + // are performing the operations. + // + // The `Mutex` state machine for basic lock/unlock operations is quite simple: + // + // | | Lock() | Unlock() | + // |----------------+------------------------+----------| + // | Free | Exclusive | invalid | + // | Exclusive | blocks, then exclusive | Free | + // + // The full conditions are as follows. + // + // * Calls to `Unlock()` require that the mutex be held, and must be made in the + // same thread that performed the corresponding `Lock()` operation which + // acquired the mutex; otherwise the call is invalid. + // + // * The mutex being non-reentrant (or non-recursive) means that a call to + // `Lock()` or `TryLock()` must not be made in a thread that already holds the + // mutex; such a call is invalid. + // + // * In other words, the state of being "held" has both a temporal component + // (from `Lock()` until `Unlock()`) as well as a thread identity component: + // the mutex is held *by a particular thread*. + // + // An "invalid" operation has undefined behavior. The `Mutex` implementation + // is allowed to do anything on an invalid call, including, but not limited to, + // crashing with a useful error message, silently succeeding, or corrupting + // data structures. In debug mode, the implementation may crash with a useful + // error message. + // + // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it + // is, however, approximately fair over long periods, and starvation-free for + // threads at the same priority. + // + // The lock/unlock primitives are now annotated with lock annotations + // defined in (base/thread_annotations.h). When writing multi-threaded code, + // you should use lock annotations whenever possible to document your lock + // synchronization policy. Besides acting as documentation, these annotations + // also help compilers or static analysis tools to identify and warn about + // issues that could potentially result in race conditions and deadlocks. + // + // For more information about the lock annotations, please see + // [Thread Safety + // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang + // documentation. + // + // See also `MutexLock`, below, for scoped `Mutex` acquisition. + + class ABSL_LOCKABLE Mutex + { + public: + // Creates a `Mutex` that is not held by anyone. This constructor is + // typically used for Mutexes allocated on the heap or the stack. + // + // To create `Mutex` instances with static storage duration + // (e.g. a namespace-scoped or global variable), see + // `Mutex::Mutex(absl::kConstInit)` below instead. + Mutex(); + + // Creates a mutex with static storage duration. A global variable + // constructed this way avoids the lifetime issues that can occur on program + // startup and shutdown. (See absl/base/const_init.h.) + // + // For Mutexes allocated on the heap and stack, instead use the default + // constructor, which can interact more fully with the thread sanitizer. + // + // Example usage: + // namespace foo { + // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit); + // } + explicit constexpr Mutex(absl::ConstInitType); + + ~Mutex(); + + // Mutex::Lock() + // + // Blocks the calling thread, if necessary, until this `Mutex` is free, and + // then acquires it exclusively. (This lock is also known as a "write lock.") + void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(); + + // Mutex::Unlock() + // + // Releases this `Mutex` and returns it from the exclusive/write state to the + // free state. Calling thread must hold the `Mutex` exclusively. + void Unlock() ABSL_UNLOCK_FUNCTION(); + + // Mutex::TryLock() + // + // If the mutex can be acquired without blocking, does so exclusively and + // returns `true`. Otherwise, returns `false`. Returns `true` with high + // probability if the `Mutex` was free. + bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true); + + // Mutex::AssertHeld() + // + // Require that the mutex be held exclusively (write mode) by this thread. + // + // If the mutex is not currently held by this thread, this function may report + // an error (typically by crashing with a diagnostic) or it may do nothing. + // This function is intended only as a tool to assist debugging; it doesn't + // guarantee correctness. + void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK(); + + // --------------------------------------------------------------------------- + // Reader-Writer Locking + // --------------------------------------------------------------------------- + + // A Mutex can also be used as a starvation-free reader-writer lock. + // Neither read-locks nor write-locks are reentrant/recursive to avoid + // potential client programming errors. + // + // The Mutex API provides `Writer*()` aliases for the existing `Lock()`, + // `Unlock()` and `TryLock()` methods for use within applications mixing + // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this + // manner can make locking behavior clearer when mixing read and write modes. + // + // Introducing reader locks necessarily complicates the `Mutex` state + // machine somewhat. The table below illustrates the allowed state transitions + // of a mutex in such cases. Note that ReaderLock() may block even if the lock + // is held in shared mode; this occurs when another thread is blocked on a + // call to WriterLock(). + // + // --------------------------------------------------------------------------- + // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock() + // --------------------------------------------------------------------------- + // State + // --------------------------------------------------------------------------- + // Free Exclusive invalid Shared(1) invalid + // Shared(1) blocks invalid Shared(2) or blocks Free + // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1) + // Exclusive blocks Free blocks invalid + // --------------------------------------------------------------------------- + // + // In comments below, "shared" refers to a state of Shared(n) for any n > 0. + + // Mutex::ReaderLock() + // + // Blocks the calling thread, if necessary, until this `Mutex` is either free, + // or in shared mode, and then acquires a share of it. Note that + // `ReaderLock()` will block if some other thread has an exclusive/writer lock + // on the mutex. + + void ReaderLock() ABSL_SHARED_LOCK_FUNCTION(); + + // Mutex::ReaderUnlock() + // + // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to + // the free state if this thread holds the last reader lock on the mutex. Note + // that you cannot call `ReaderUnlock()` on a mutex held in write mode. + void ReaderUnlock() ABSL_UNLOCK_FUNCTION(); + + // Mutex::ReaderTryLock() + // + // If the mutex can be acquired without blocking, acquires this mutex for + // shared access and returns `true`. Otherwise, returns `false`. Returns + // `true` with high probability if the `Mutex` was free or shared. + bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true); + + // Mutex::AssertReaderHeld() + // + // Require that the mutex be held at least in shared mode (read mode) by this + // thread. + // + // If the mutex is not currently held by this thread, this function may report + // an error (typically by crashing with a diagnostic) or it may do nothing. + // This function is intended only as a tool to assist debugging; it doesn't + // guarantee correctness. + void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK(); + + // Mutex::WriterLock() + // Mutex::WriterUnlock() + // Mutex::WriterTryLock() + // + // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`. + // + // These methods may be used (along with the complementary `Reader*()` + // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`, + // etc.) from reader/writer lock usage. + void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + this->Lock(); + } + + void WriterUnlock() ABSL_UNLOCK_FUNCTION() + { + this->Unlock(); + } + + bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) + { + return this->TryLock(); + } + + // --------------------------------------------------------------------------- + // Conditional Critical Regions + // --------------------------------------------------------------------------- + + // Conditional usage of a `Mutex` can occur using two distinct paradigms: + // + // * Use of `Mutex` member functions with `Condition` objects. + // * Use of the separate `CondVar` abstraction. + // + // In general, prefer use of `Condition` and the `Mutex` member functions + // listed below over `CondVar`. When there are multiple threads waiting on + // distinctly different conditions, however, a battery of `CondVar`s may be + // more efficient. This section discusses use of `Condition` objects. + // + // `Mutex` contains member functions for performing lock operations only under + // certain conditions, of class `Condition`. For correctness, the `Condition` + // must return a boolean that is a pure function, only of state protected by + // the `Mutex`. The condition must be invariant w.r.t. environmental state + // such as thread, cpu id, or time, and must be `noexcept`. The condition will + // always be invoked with the mutex held in at least read mode, so you should + // not block it for long periods or sleep it on a timer. + // + // Since a condition must not depend directly on the current time, use + // `*WithTimeout()` member function variants to make your condition + // effectively true after a given duration, or `*WithDeadline()` variants to + // make your condition effectively true after a given time. + // + // The condition function should have no side-effects aside from debug + // logging; as a special exception, the function may acquire other mutexes + // provided it releases all those that it acquires. (This exception was + // required to allow logging.) + + // Mutex::Await() + // + // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true` + // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the + // same mode in which it was previously held. If the condition is initially + // `true`, `Await()` *may* skip the release/re-acquire step. + // + // `Await()` requires that this thread holds this `Mutex` in some mode. + void Await(const Condition& cond); + + // Mutex::LockWhen() + // Mutex::ReaderLockWhen() + // Mutex::WriterLockWhen() + // + // Blocks until simultaneously both `cond` is `true` and this `Mutex` can + // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is + // logically equivalent to `*Lock(); Await();` though they may have different + // performance characteristics. + void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(); + + void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(); + + void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + this->LockWhen(cond); + } + + // --------------------------------------------------------------------------- + // Mutex Variants with Timeouts/Deadlines + // --------------------------------------------------------------------------- + + // Mutex::AwaitWithTimeout() + // Mutex::AwaitWithDeadline() + // + // Unlocks this `Mutex` and blocks until simultaneously: + // - either `cond` is true or the {timeout has expired, deadline has passed} + // and + // - this `Mutex` can be reacquired, + // then reacquire this `Mutex` in the same mode in which it was previously + // held, returning `true` iff `cond` is `true` on return. + // + // If the condition is initially `true`, the implementation *may* skip the + // release/re-acquire step and return immediately. + // + // Deadlines in the past are equivalent to an immediate deadline. + // Negative timeouts are equivalent to a zero timeout. + // + // This method requires that this thread holds this `Mutex` in some mode. + bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout); + + bool AwaitWithDeadline(const Condition& cond, absl::Time deadline); + + // Mutex::LockWhenWithTimeout() + // Mutex::ReaderLockWhenWithTimeout() + // Mutex::WriterLockWhenWithTimeout() + // + // Blocks until simultaneously both: + // - either `cond` is `true` or the timeout has expired, and + // - this `Mutex` can be acquired, + // then atomically acquires this `Mutex`, returning `true` iff `cond` is + // `true` on return. + // + // Negative timeouts are equivalent to a zero timeout. + bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) + ABSL_EXCLUSIVE_LOCK_FUNCTION(); + bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout) + ABSL_SHARED_LOCK_FUNCTION(); + bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout) + ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + return this->LockWhenWithTimeout(cond, timeout); + } + + // Mutex::LockWhenWithDeadline() + // Mutex::ReaderLockWhenWithDeadline() + // Mutex::WriterLockWhenWithDeadline() + // + // Blocks until simultaneously both: + // - either `cond` is `true` or the deadline has been passed, and + // - this `Mutex` can be acquired, + // then atomically acquires this Mutex, returning `true` iff `cond` is `true` + // on return. + // + // Deadlines in the past are equivalent to an immediate deadline. + bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline) + ABSL_EXCLUSIVE_LOCK_FUNCTION(); + bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline) + ABSL_SHARED_LOCK_FUNCTION(); + bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline) + ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + return this->LockWhenWithDeadline(cond, deadline); + } + + // --------------------------------------------------------------------------- + // Debug Support: Invariant Checking, Deadlock Detection, Logging. + // --------------------------------------------------------------------------- + + // Mutex::EnableInvariantDebugging() + // + // If `invariant`!=null and if invariant debugging has been enabled globally, + // cause `(*invariant)(arg)` to be called at moments when the invariant for + // this `Mutex` should hold (for example: just after acquire, just before + // release). + // + // The routine `invariant` should have no side-effects since it is not + // guaranteed how many times it will be called; it should check the invariant + // and crash if it does not hold. Enabling global invariant debugging may + // substantially reduce `Mutex` performance; it should be set only for + // non-production runs. Optimization options may also disable invariant + // checks. + void EnableInvariantDebugging(void (*invariant)(void*), void* arg); + + // Mutex::EnableDebugLog() + // + // Cause all subsequent uses of this `Mutex` to be logged via + // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous + // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made. + // + // Note: This method substantially reduces `Mutex` performance. + void EnableDebugLog(const char* name); + + // Deadlock detection + + // Mutex::ForgetDeadlockInfo() + // + // Forget any deadlock-detection information previously gathered + // about this `Mutex`. Call this method in debug mode when the lock ordering + // of a `Mutex` changes. + void ForgetDeadlockInfo(); + + // Mutex::AssertNotHeld() + // + // Return immediately if this thread does not hold this `Mutex` in any + // mode; otherwise, may report an error (typically by crashing with a + // diagnostic), or may return immediately. + // + // Currently this check is performed only if all of: + // - in debug mode + // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort + // - number of locks concurrently held by this thread is not large. + // are true. + void AssertNotHeld() const; + + // Special cases. + + // A `MuHow` is a constant that indicates how a lock should be acquired. + // Internal implementation detail. Clients should ignore. + typedef const struct MuHowS* MuHow; + + // Mutex::InternalAttemptToUseMutexInFatalSignalHandler() + // + // Causes the `Mutex` implementation to prepare itself for re-entry caused by + // future use of `Mutex` within a fatal signal handler. This method is + // intended for use only for last-ditch attempts to log crash information. + // It does not guarantee that attempts to use Mutexes within the handler will + // not deadlock; it merely makes other faults less likely. + // + // WARNING: This routine must be invoked from a signal handler, and the + // signal handler must either loop forever or terminate the process. + // Attempts to return from (or `longjmp` out of) the signal handler once this + // call has been made may cause arbitrary program behaviour including + // crashes and deadlocks. + static void InternalAttemptToUseMutexInFatalSignalHandler(); + + private: + std::atomic mu_; // The Mutex state. + + // Post()/Wait() versus associated PerThreadSem; in class for required + // friendship with PerThreadSem. + static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w); + static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w, synchronization_internal::KernelTimeout t); + + // slow path acquire + void LockSlowLoop(SynchWaitParams* waitp, int flags); + // wrappers around LockSlowLoop() + bool LockSlowWithDeadline(MuHow how, const Condition* cond, synchronization_internal::KernelTimeout t, int flags); + void LockSlow(MuHow how, const Condition* cond, int flags) ABSL_ATTRIBUTE_COLD; + // slow path release + void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD; + // Common code between Await() and AwaitWithTimeout/Deadline() + bool AwaitCommon(const Condition& cond, synchronization_internal::KernelTimeout t); + // Attempt to remove thread s from queue. + void TryRemove(base_internal::PerThreadSynch* s); + // Block a thread on mutex. + void Block(base_internal::PerThreadSynch* s); + // Wake a thread; return successor. + base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w); + + friend class CondVar; // for access to Trans()/Fer(). + void Trans(MuHow how); // used for CondVar->Mutex transfer + void Fer( + base_internal::PerThreadSynch* w + ); // used for CondVar->Mutex transfer + + // Catch the error of writing Mutex when intending MutexLock. + explicit Mutex(const volatile Mutex* /*ignored*/) + { + } + + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; + }; + + // ----------------------------------------------------------------------------- + // Mutex RAII Wrappers + // ----------------------------------------------------------------------------- + + // MutexLock + // + // `MutexLock` is a helper class, which acquires and releases a `Mutex` via + // RAII. + // + // Example: + // + // Class Foo { + // public: + // Foo::Bar* Baz() { + // MutexLock lock(&mu_); + // ... + // return bar; + // } + // + // private: + // Mutex mu_; + // }; + class ABSL_SCOPED_LOCKABLE MutexLock + { + public: + // Constructors + + // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is + // guaranteed to be locked when this object is constructed. Requires that + // `mu` be dereferenceable. + explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + this->mu_->Lock(); + } + + // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to + // the above, the condition given by `cond` is also guaranteed to hold when + // this object is constructed. + explicit MutexLock(Mutex* mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + this->mu_->LockWhen(cond); + } + + MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex) + MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex) + MutexLock& operator=(const MutexLock&) = delete; + MutexLock& operator=(MutexLock&&) = delete; + + ~MutexLock() ABSL_UNLOCK_FUNCTION() + { + this->mu_->Unlock(); + } + + private: + Mutex* const mu_; + }; + + // ReaderMutexLock + // + // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and + // releases a shared lock on a `Mutex` via RAII. + class ABSL_SCOPED_LOCKABLE ReaderMutexLock + { + public: + explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : + mu_(mu) + { + mu->ReaderLock(); + } + + explicit ReaderMutexLock(Mutex* mu, const Condition& cond) + ABSL_SHARED_LOCK_FUNCTION(mu) : + mu_(mu) + { + mu->ReaderLockWhen(cond); + } + + ReaderMutexLock(const ReaderMutexLock&) = delete; + ReaderMutexLock(ReaderMutexLock&&) = delete; + ReaderMutexLock& operator=(const ReaderMutexLock&) = delete; + ReaderMutexLock& operator=(ReaderMutexLock&&) = delete; + + ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() + { + this->mu_->ReaderUnlock(); + } + + private: + Mutex* const mu_; + }; + + // WriterMutexLock + // + // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and + // releases a write (exclusive) lock on a `Mutex` via RAII. + class ABSL_SCOPED_LOCKABLE WriterMutexLock + { + public: + explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + mu->WriterLock(); + } + + explicit WriterMutexLock(Mutex* mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + mu->WriterLockWhen(cond); + } + + WriterMutexLock(const WriterMutexLock&) = delete; + WriterMutexLock(WriterMutexLock&&) = delete; + WriterMutexLock& operator=(const WriterMutexLock&) = delete; + WriterMutexLock& operator=(WriterMutexLock&&) = delete; + + ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() + { + this->mu_->WriterUnlock(); + } + + private: + Mutex* const mu_; + }; + + // ----------------------------------------------------------------------------- + // Condition + // ----------------------------------------------------------------------------- + // + // `Mutex` contains a number of member functions which take a `Condition` as an + // argument; clients can wait for conditions to become `true` before attempting + // to acquire the mutex. These sections are known as "condition critical" + // sections. To use a `Condition`, you simply need to construct it, and use + // within an appropriate `Mutex` member function; everything else in the + // `Condition` class is an implementation detail. + // + // A `Condition` is specified as a function pointer which returns a boolean. + // `Condition` functions should be pure functions -- their results should depend + // only on passed arguments, should not consult any external state (such as + // clocks), and should have no side-effects, aside from debug logging. Any + // objects that the function may access should be limited to those which are + // constant while the mutex is blocked on the condition (e.g. a stack variable), + // or objects of state protected explicitly by the mutex. + // + // No matter which construction is used for `Condition`, the underlying + // function pointer / functor / callable must not throw any + // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in + // the face of a throwing `Condition`. (When Abseil is allowed to depend + // on C++17, these function pointers will be explicitly marked + // `noexcept`; until then this requirement cannot be enforced in the + // type system.) + // + // Note: to use a `Condition`, you need only construct it and pass it to a + // suitable `Mutex' member function, such as `Mutex::Await()`, or to the + // constructor of one of the scope guard classes. + // + // Example using LockWhen/Unlock: + // + // // assume count_ is not internal reference count + // int count_ ABSL_GUARDED_BY(mu_); + // Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_); + // + // mu_.LockWhen(count_is_zero); + // // ... + // mu_.Unlock(); + // + // Example using a scope guard: + // + // { + // MutexLock lock(&mu_, count_is_zero); + // // ... + // } + // + // When multiple threads are waiting on exactly the same condition, make sure + // that they are constructed with the same parameters (same pointer to function + // + arg, or same pointer to object + method), so that the mutex implementation + // can avoid redundantly evaluating the same condition for each thread. + class Condition + { + public: + // A Condition that returns the result of "(*func)(arg)" + Condition(bool (*func)(void*), void* arg); + + // Templated version for people who are averse to casts. + // + // To use a lambda, prepend it with unary plus, which converts the lambda + // into a function pointer: + // Condition(+[](T* t) { return ...; }, arg). + // + // Note: lambdas in this case must contain no bound variables. + // + // See class comment for performance advice. + template + Condition(bool (*func)(T*), T* arg); + + // Same as above, but allows for cases where `arg` comes from a pointer that + // is convertible to the function parameter type `T*` but not an exact match. + // + // For example, the argument might be `X*` but the function takes `const X*`, + // or the argument might be `Derived*` while the function takes `Base*`, and + // so on for cases where the argument pointer can be implicitly converted. + // + // Implementation notes: This constructor overload is required in addition to + // the one above to allow deduction of `T` from `arg` for cases such as where + // a function template is passed as `func`. Also, the dummy `typename = void` + // template parameter exists just to work around a MSVC mangling bug. + template + Condition(bool (*func)(T*), typename absl::internal::identity::type* arg); + + // Templated version for invoking a method that returns a `bool`. + // + // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates + // `object->Method()`. + // + // Implementation Note: `absl::internal::identity` is used to allow methods to + // come from base classes. A simpler signature like + // `Condition(T*, bool (T::*)())` does not suffice. + template + Condition(T* object, bool (absl::internal::identity::type::*method)()); + + // Same as above, for const members + template + Condition(const T* object, bool (absl::internal::identity::type::*method)() const); + + // A Condition that returns the value of `*cond` + explicit Condition(const bool* cond); + + // Templated version for invoking a functor that returns a `bool`. + // This approach accepts pointers to non-mutable lambdas, `std::function`, + // the result of` std::bind` and user-defined functors that define + // `bool F::operator()() const`. + // + // Example: + // + // auto reached = [this, current]() { + // mu_.AssertReaderHeld(); // For annotalysis. + // return processed_ >= current; + // }; + // mu_.Await(Condition(&reached)); + // + // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in + // the lambda as it may be called when the mutex is being unlocked from a + // scope holding only a reader lock, which will make the assertion not + // fulfilled and crash the binary. + + // See class comment for performance advice. In particular, if there + // might be more than one waiter for the same condition, make sure + // that all waiters construct the condition with the same pointers. + + // Implementation note: The second template parameter ensures that this + // constructor doesn't participate in overload resolution if T doesn't have + // `bool operator() const`. + template(&T::operator()))> + explicit Condition(const T* obj) : + Condition(obj, static_cast(&T::operator())) + { + } + + // A Condition that always returns `true`. + // kTrue is only useful in a narrow set of circumstances, mostly when + // it's passed conditionally. For example: + // + // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition); + // + // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition + // don't return immediately when the timeout happens, they still block until + // the Mutex becomes available. The return value of these methods does + // not indicate if the timeout was reached; rather it indicates whether or + // not the condition is true. + ABSL_CONST_INIT static const Condition kTrue; + + // Evaluates the condition. + bool Eval() const; + + // Returns `true` if the two conditions are guaranteed to return the same + // value if evaluated at the same time, `false` if the evaluation *may* return + // different results. + // + // Two `Condition` values are guaranteed equal if both their `func` and `arg` + // components are the same. A null pointer is equivalent to a `true` + // condition. + static bool GuaranteedEqual(const Condition* a, const Condition* b); + + private: + // Sizing an allocation for a method pointer can be subtle. In the Itanium + // specifications, a method pointer has a predictable, uniform size. On the + // other hand, MSVC ABI, method pointer sizes vary based on the + // inheritance of the class. Specifically, method pointers from classes with + // multiple inheritance are bigger than those of classes with single + // inheritance. Other variations also exist. + +#ifndef _MSC_VER + // Allocation for a function pointer or method pointer. + // The {0} initializer ensures that all unused bytes of this buffer are + // always zeroed out. This is necessary, because GuaranteedEqual() compares + // all of the bytes, unaware of which bytes are relevant to a given `eval_`. + using MethodPtr = bool (Condition::*)(); + char callback_[sizeof(MethodPtr)] = {0}; +#else + // It is well known that the larget MSVC pointer-to-member is 24 bytes. This + // may be the largest known pointer-to-member of any platform. For this + // reason we will allocate 24 bytes for MSVC platform toolchains. + char callback_[24] = {0}; +#endif + + // Function with which to evaluate callbacks and/or arguments. + bool (*eval_)(const Condition*) = nullptr; + + // Either an argument for a function call or an object for a method call. + void* arg_ = nullptr; + + // Various functions eval_ can point to: + static bool CallVoidPtrFunction(const Condition*); + template + static bool CastAndCallFunction(const Condition* c); + template + static bool CastAndCallMethod(const Condition* c); + + // Helper methods for storing, validating, and reading callback arguments. + template + inline void StoreCallback(T callback) + { + static_assert( + sizeof(callback) <= sizeof(callback_), + "An overlarge pointer was passed as a callback to Condition." + ); + std::memcpy(callback_, &callback, sizeof(callback)); + } + + template + inline void ReadCallback(T* callback) const + { + std::memcpy(callback, callback_, sizeof(*callback)); + } + + // Used only to create kTrue. + constexpr Condition() = default; + }; + + // ----------------------------------------------------------------------------- + // CondVar + // ----------------------------------------------------------------------------- + // + // A condition variable, reflecting state evaluated separately outside of the + // `Mutex` object, which can be signaled to wake callers. + // This class is not normally needed; use `Mutex` member functions such as + // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases + // with many threads and many conditions, `CondVar` may be faster. + // + // The implementation may deliver signals to any condition variable at + // any time, even when no call to `Signal()` or `SignalAll()` is made; as a + // result, upon being awoken, you must check the logical condition you have + // been waiting upon. + // + // Examples: + // + // Usage for a thread waiting for some condition C protected by mutex mu: + // mu.Lock(); + // while (!C) { cv->Wait(&mu); } // releases and reacquires mu + // // C holds; process data + // mu.Unlock(); + // + // Usage to wake T is: + // mu.Lock(); + // // process data, possibly establishing C + // if (C) { cv->Signal(); } + // mu.Unlock(); + // + // If C may be useful to more than one waiter, use `SignalAll()` instead of + // `Signal()`. + // + // With this implementation it is efficient to use `Signal()/SignalAll()` inside + // the locked region; this usage can make reasoning about your program easier. + // + class CondVar + { + public: + // A `CondVar` allocated on the heap or on the stack can use the this + // constructor. + CondVar(); + ~CondVar(); + + // CondVar::Wait() + // + // Atomically releases a `Mutex` and blocks on this condition variable. + // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a + // spurious wakeup), then reacquires the `Mutex` and returns. + // + // Requires and ensures that the current thread holds the `Mutex`. + void Wait(Mutex* mu); + + // CondVar::WaitWithTimeout() + // + // Atomically releases a `Mutex` and blocks on this condition variable. + // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a + // spurious wakeup), or until the timeout has expired, then reacquires + // the `Mutex` and returns. + // + // Returns true if the timeout has expired without this `CondVar` + // being signalled in any manner. If both the timeout has expired + // and this `CondVar` has been signalled, the implementation is free + // to return `true` or `false`. + // + // Requires and ensures that the current thread holds the `Mutex`. + bool WaitWithTimeout(Mutex* mu, absl::Duration timeout); + + // CondVar::WaitWithDeadline() + // + // Atomically releases a `Mutex` and blocks on this condition variable. + // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a + // spurious wakeup), or until the deadline has passed, then reacquires + // the `Mutex` and returns. + // + // Deadlines in the past are equivalent to an immediate deadline. + // + // Returns true if the deadline has passed without this `CondVar` + // being signalled in any manner. If both the deadline has passed + // and this `CondVar` has been signalled, the implementation is free + // to return `true` or `false`. + // + // Requires and ensures that the current thread holds the `Mutex`. + bool WaitWithDeadline(Mutex* mu, absl::Time deadline); + + // CondVar::Signal() + // + // Signal this `CondVar`; wake at least one waiter if one exists. + void Signal(); + + // CondVar::SignalAll() + // + // Signal this `CondVar`; wake all waiters. + void SignalAll(); + + // CondVar::EnableDebugLog() + // + // Causes all subsequent uses of this `CondVar` to be logged via + // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`. + // Note: this method substantially reduces `CondVar` performance. + void EnableDebugLog(const char* name); + + private: + bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t); + void Remove(base_internal::PerThreadSynch* s); + void Wakeup(base_internal::PerThreadSynch* w); + std::atomic cv_; // Condition variable state. + CondVar(const CondVar&) = delete; + CondVar& operator=(const CondVar&) = delete; + }; + + // Variants of MutexLock. + // + // If you find yourself using one of these, consider instead using + // Mutex::Unlock() and/or if-statements for clarity. + + // MutexLockMaybe + // + // MutexLockMaybe is like MutexLock, but is a no-op when mu is null. + class ABSL_SCOPED_LOCKABLE MutexLockMaybe + { + public: + explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + if (this->mu_ != nullptr) + { + this->mu_->Lock(); + } + } + + explicit MutexLockMaybe(Mutex* mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + if (this->mu_ != nullptr) + { + this->mu_->LockWhen(cond); + } + } + + ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() + { + if (this->mu_ != nullptr) + { + this->mu_->Unlock(); + } + } + + private: + Mutex* const mu_; + MutexLockMaybe(const MutexLockMaybe&) = delete; + MutexLockMaybe(MutexLockMaybe&&) = delete; + MutexLockMaybe& operator=(const MutexLockMaybe&) = delete; + MutexLockMaybe& operator=(MutexLockMaybe&&) = delete; + }; + + // ReleasableMutexLock + // + // ReleasableMutexLock is like MutexLock, but permits `Release()` of its + // mutex before destruction. `Release()` may be called at most once. + class ABSL_SCOPED_LOCKABLE ReleasableMutexLock + { + public: + explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + this->mu_->Lock(); + } + + explicit ReleasableMutexLock(Mutex* mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : + mu_(mu) + { + this->mu_->LockWhen(cond); + } + + ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() + { + if (this->mu_ != nullptr) + { + this->mu_->Unlock(); + } + } + + void Release() ABSL_UNLOCK_FUNCTION(); + + private: + Mutex* mu_; + ReleasableMutexLock(const ReleasableMutexLock&) = delete; + ReleasableMutexLock(ReleasableMutexLock&&) = delete; + ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete; + ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete; + }; + + inline Mutex::Mutex() : + mu_(0) + { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + inline constexpr Mutex::Mutex(absl::ConstInitType) : + mu_(0) + { + } + + inline CondVar::CondVar() : + cv_(0) + { + } + + // static + template + bool Condition::CastAndCallMethod(const Condition* c) + { + T* object = static_cast(c->arg_); + bool (T::*method_pointer)(); + c->ReadCallback(&method_pointer); + return (object->*method_pointer)(); + } + + // static + template + bool Condition::CastAndCallFunction(const Condition* c) + { + bool (*function)(T*); + c->ReadCallback(&function); + T* argument = static_cast(c->arg_); + return (*function)(argument); + } + + template + inline Condition::Condition(bool (*func)(T*), T* arg) : + eval_(&CastAndCallFunction), + arg_(const_cast(static_cast(arg))) + { + static_assert(sizeof(&func) <= sizeof(callback_), "An overlarge function pointer was passed to Condition."); + StoreCallback(func); + } + + template + inline Condition::Condition(bool (*func)(T*), typename absl::internal::identity::type* arg) + // Just delegate to the overload above. + : + Condition(func, arg) + { + } + + template + inline Condition::Condition(T* object, bool (absl::internal::identity::type::*method)()) : + eval_(&CastAndCallMethod), + arg_(object) + { + static_assert(sizeof(&method) <= sizeof(callback_), "An overlarge method pointer was passed to Condition."); + StoreCallback(method); + } + + template + inline Condition::Condition(const T* object, bool (absl::internal::identity::type::*method)() const) : + eval_(&CastAndCallMethod), + arg_(reinterpret_cast(const_cast(object))) + { + StoreCallback(method); + } + + // Register hooks for profiling support. + // + // The function pointer registered here will be called whenever a mutex is + // contended. The callback is given the cycles for which waiting happened (as + // measured by //absl/base/internal/cycleclock.h, and which may not + // be real "cycle" counts.) + // + // There is no ordering guarantee between when the hook is registered and when + // callbacks will begin. Only a single profiler can be installed in a running + // binary; if this function is called a second time with a different function + // pointer, the value is ignored (and will cause an assertion failure in debug + // mode.) + void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)); + + // Register a hook for Mutex tracing. + // + // The function pointer registered here will be called whenever a mutex is + // contended. The callback is given an opaque handle to the contended mutex, + // an event name, and the number of wait cycles (as measured by + // //absl/base/internal/cycleclock.h, and which may not be real + // "cycle" counts.) + // + // The only event name currently sent is "slow release". + // + // This has the same ordering and single-use limitations as + // RegisterMutexProfiler() above. + void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj, int64_t wait_cycles)); + + // Register a hook for CondVar tracing. + // + // The function pointer registered here will be called here on various CondVar + // events. The callback is given an opaque handle to the CondVar object and + // a string identifying the event. This is thread-safe, but only a single + // tracer can be registered. + // + // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and + // "SignalAll wakeup". + // + // This has the same ordering and single-use limitations as + // RegisterMutexProfiler() above. + void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)); + + // EnableMutexInvariantDebugging() + // + // Enable or disable global support for Mutex invariant debugging. If enabled, + // then invariant predicates can be registered per-Mutex for debug checking. + // See Mutex::EnableInvariantDebugging(). + void EnableMutexInvariantDebugging(bool enabled); + + // When in debug mode, and when the feature has been enabled globally, the + // implementation will keep track of lock ordering and complain (or optionally + // crash) if a cycle is detected in the acquired-before graph. + + // Possible modes of operation for the deadlock detector in debug mode. + enum class OnDeadlockCycle + { + kIgnore, // Neither report on nor attempt to track cycles in lock ordering + kReport, // Report lock cycles to stderr when detected + kAbort, // Report lock cycles to stderr when detected, then abort + }; + + // SetMutexDeadlockDetectionMode() + // + // Enable or disable global support for detection of potential deadlocks + // due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of + // lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph + // will be maintained internally, and detected cycles will be reported in + // the manner chosen here. + void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode); + + ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" +{ + void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); +} // extern "C" + +#endif // ABSL_SYNCHRONIZATION_MUTEX_H_ diff --git a/CAPI/cpp/grpc/include/absl/synchronization/notification.h b/CAPI/cpp/grpc/include/absl/synchronization/notification.h new file mode 100644 index 00000000..d07df334 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/synchronization/notification.h @@ -0,0 +1,134 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// notification.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `Notification` abstraction, which allows threads +// to receive notification of a single occurrence of a single event. +// +// The `Notification` object maintains a private boolean "notified" state that +// transitions to `true` at most once. The `Notification` class provides the +// following primary member functions: +// * `HasBeenNotified()` to query its state +// * `WaitForNotification*()` to have threads wait until the "notified" state +// is `true`. +// * `Notify()` to set the notification's "notified" state to `true` and +// notify all waiting threads that the event has occurred. +// This method may only be called once. +// +// Note that while `Notify()` may only be called once, it is perfectly valid to +// call any of the `WaitForNotification*()` methods multiple times, from +// multiple threads -- even after the notification's "notified" state has been +// set -- in which case those methods will immediately return. +// +// Note that the lifetime of a `Notification` requires careful consideration; +// it might not be safe to destroy a notification after calling `Notify()` since +// it is still legal for other threads to call `WaitForNotification*()` methods +// on the notification. However, observers responding to a "notified" state of +// `true` can safely delete the notification without interfering with the call +// to `Notify()` in the other thread. +// +// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any +// action taken by X before it calls `Notify()` is visible to thread Y after: +// * Y returns from `WaitForNotification()`, or +// * Y receives a `true` return value from either `HasBeenNotified()` or +// `WaitForNotificationWithTimeout()`. + +#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_ +#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/synchronization/mutex.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // Notification + // ----------------------------------------------------------------------------- + class Notification + { + public: + // Initializes the "notified" state to unnotified. + Notification() : + notified_yet_(false) + { + } + explicit Notification(bool prenotify) : + notified_yet_(prenotify) + { + } + Notification(const Notification&) = delete; + Notification& operator=(const Notification&) = delete; + ~Notification(); + + // Notification::HasBeenNotified() + // + // Returns the value of the notification's internal "notified" state. + ABSL_MUST_USE_RESULT bool HasBeenNotified() const + { + return HasBeenNotifiedInternal(&this->notified_yet_); + } + + // Notification::WaitForNotification() + // + // Blocks the calling thread until the notification's "notified" state is + // `true`. Note that if `Notify()` has been previously called on this + // notification, this function will immediately return. + void WaitForNotification() const; + + // Notification::WaitForNotificationWithTimeout() + // + // Blocks until either the notification's "notified" state is `true` (which + // may occur immediately) or the timeout has elapsed, returning the value of + // its "notified" state in either case. + bool WaitForNotificationWithTimeout(absl::Duration timeout) const; + + // Notification::WaitForNotificationWithDeadline() + // + // Blocks until either the notification's "notified" state is `true` (which + // may occur immediately) or the deadline has expired, returning the value of + // its "notified" state in either case. + bool WaitForNotificationWithDeadline(absl::Time deadline) const; + + // Notification::Notify() + // + // Sets the "notified" state of this notification to `true` and wakes waiting + // threads. Note: do not call `Notify()` multiple times on the same + // `Notification`; calling `Notify()` more than once on the same notification + // results in undefined behavior. + void Notify(); + + private: + static inline bool HasBeenNotifiedInternal( + const std::atomic* notified_yet + ) + { + return notified_yet->load(std::memory_order_acquire); + } + + mutable Mutex mutex_; + std::atomic notified_yet_; // written under mutex_ + }; + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/civil_time.h b/CAPI/cpp/grpc/include/absl/time/civil_time.h new file mode 100644 index 00000000..ef327541 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/civil_time.h @@ -0,0 +1,582 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: civil_time.h +// ----------------------------------------------------------------------------- +// +// This header file defines abstractions for computing with "civil time". +// The term "civil time" refers to the legally recognized human-scale time +// that is represented by the six fields `YYYY-MM-DD hh:mm:ss`. A "date" +// is perhaps the most common example of a civil time (represented here as +// an `absl::CivilDay`). +// +// Modern-day civil time follows the Gregorian Calendar and is a +// time-zone-independent concept: a civil time of "2015-06-01 12:00:00", for +// example, is not tied to a time zone. Put another way, a civil time does not +// map to a unique point in time; a civil time must be mapped to an absolute +// time *through* a time zone. +// +// Because a civil time is what most people think of as "time," it is common to +// map absolute times to civil times to present to users. +// +// Time zones define the relationship between absolute and civil times. Given an +// absolute or civil time and a time zone, you can compute the other time: +// +// Civil Time = F(Absolute Time, Time Zone) +// Absolute Time = G(Civil Time, Time Zone) +// +// The Abseil time library allows you to construct such civil times from +// absolute times; consult time.h for such functionality. +// +// This library provides six classes for constructing civil-time objects, and +// provides several helper functions for rounding, iterating, and performing +// arithmetic on civil-time objects, while avoiding complications like +// daylight-saving time (DST): +// +// * `absl::CivilSecond` +// * `absl::CivilMinute` +// * `absl::CivilHour` +// * `absl::CivilDay` +// * `absl::CivilMonth` +// * `absl::CivilYear` +// +// Example: +// +// // Construct a civil-time object for a specific day +// const absl::CivilDay cd(1969, 07, 20); +// +// // Construct a civil-time object for a specific second +// const absl::CivilSecond cd(2018, 8, 1, 12, 0, 1); +// +// Note: In C++14 and later, this library is usable in a constexpr context. +// +// Example: +// +// // Valid in C++14 +// constexpr absl::CivilDay cd(1969, 07, 20); + +#ifndef ABSL_TIME_CIVIL_TIME_H_ +#define ABSL_TIME_CIVIL_TIME_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace time_internal + { + struct second_tag : cctz::detail::second_tag + { + }; + struct minute_tag : second_tag, cctz::detail::minute_tag + { + }; + struct hour_tag : minute_tag, cctz::detail::hour_tag + { + }; + struct day_tag : hour_tag, cctz::detail::day_tag + { + }; + struct month_tag : day_tag, cctz::detail::month_tag + { + }; + struct year_tag : month_tag, cctz::detail::year_tag + { + }; + } // namespace time_internal + + // ----------------------------------------------------------------------------- + // CivilSecond, CivilMinute, CivilHour, CivilDay, CivilMonth, CivilYear + // ----------------------------------------------------------------------------- + // + // Each of these civil-time types is a simple value type with the same + // interface for construction and the same six accessors for each of the civil + // time fields (year, month, day, hour, minute, and second, aka YMDHMS). These + // classes differ only in their alignment, which is indicated by the type name + // and specifies the field on which arithmetic operates. + // + // CONSTRUCTION + // + // Each of the civil-time types can be constructed in two ways: by directly + // passing to the constructor up to six integers representing the YMDHMS fields, + // or by copying the YMDHMS fields from a differently aligned civil-time type. + // Omitted fields are assigned their minimum valid value. Hours, minutes, and + // seconds will be set to 0, month and day will be set to 1. Since there is no + // minimum year, the default is 1970. + // + // Examples: + // + // absl::CivilDay default_value; // 1970-01-01 00:00:00 + // + // absl::CivilDay a(2015, 2, 3); // 2015-02-03 00:00:00 + // absl::CivilDay b(2015, 2, 3, 4, 5, 6); // 2015-02-03 00:00:00 + // absl::CivilDay c(2015); // 2015-01-01 00:00:00 + // + // absl::CivilSecond ss(2015, 2, 3, 4, 5, 6); // 2015-02-03 04:05:06 + // absl::CivilMinute mm(ss); // 2015-02-03 04:05:00 + // absl::CivilHour hh(mm); // 2015-02-03 04:00:00 + // absl::CivilDay d(hh); // 2015-02-03 00:00:00 + // absl::CivilMonth m(d); // 2015-02-01 00:00:00 + // absl::CivilYear y(m); // 2015-01-01 00:00:00 + // + // m = absl::CivilMonth(y); // 2015-01-01 00:00:00 + // d = absl::CivilDay(m); // 2015-01-01 00:00:00 + // hh = absl::CivilHour(d); // 2015-01-01 00:00:00 + // mm = absl::CivilMinute(hh); // 2015-01-01 00:00:00 + // ss = absl::CivilSecond(mm); // 2015-01-01 00:00:00 + // + // Each civil-time class is aligned to the civil-time field indicated in the + // class's name after normalization. Alignment is performed by setting all the + // inferior fields to their minimum valid value (as described above). The + // following are examples of how each of the six types would align the fields + // representing November 22, 2015 at 12:34:56 in the afternoon. (Note: the + // string format used here is not important; it's just a shorthand way of + // showing the six YMDHMS fields.) + // + // absl::CivilSecond : 2015-11-22 12:34:56 + // absl::CivilMinute : 2015-11-22 12:34:00 + // absl::CivilHour : 2015-11-22 12:00:00 + // absl::CivilDay : 2015-11-22 00:00:00 + // absl::CivilMonth : 2015-11-01 00:00:00 + // absl::CivilYear : 2015-01-01 00:00:00 + // + // Each civil-time type performs arithmetic on the field to which it is + // aligned. This means that adding 1 to an absl::CivilDay increments the day + // field (normalizing as necessary), and subtracting 7 from an absl::CivilMonth + // operates on the month field (normalizing as necessary). All arithmetic + // produces a valid civil time. Difference requires two similarly aligned + // civil-time objects and returns the scalar answer in units of the objects' + // alignment. For example, the difference between two absl::CivilHour objects + // will give an answer in units of civil hours. + // + // ALIGNMENT CONVERSION + // + // The alignment of a civil-time object cannot change, but the object may be + // used to construct a new object with a different alignment. This is referred + // to as "realigning". When realigning to a type with the same or more + // precision (e.g., absl::CivilDay -> absl::CivilSecond), the conversion may be + // performed implicitly since no information is lost. However, if information + // could be discarded (e.g., CivilSecond -> CivilDay), the conversion must + // be explicit at the call site. + // + // Examples: + // + // void UseDay(absl::CivilDay day); + // + // absl::CivilSecond cs; + // UseDay(cs); // Won't compile because data may be discarded + // UseDay(absl::CivilDay(cs)); // OK: explicit conversion + // + // absl::CivilDay cd; + // UseDay(cd); // OK: no conversion needed + // + // absl::CivilMonth cm; + // UseDay(cm); // OK: implicit conversion to absl::CivilDay + // + // NORMALIZATION + // + // Normalization takes invalid values and adjusts them to produce valid values. + // Within the civil-time library, integer arguments passed to the Civil* + // constructors may be out-of-range, in which case they are normalized by + // carrying overflow into a field of courser granularity to produce valid + // civil-time objects. This normalization enables natural arithmetic on + // constructor arguments without worrying about the field's range. + // + // Examples: + // + // // Out-of-range; normalized to 2016-11-01 + // absl::CivilDay d(2016, 10, 32); + // // Out-of-range, negative: normalized to 2016-10-30T23 + // absl::CivilHour h1(2016, 10, 31, -1); + // // Normalization is cumulative: normalized to 2016-10-30T23 + // absl::CivilHour h2(2016, 10, 32, -25); + // + // Note: If normalization is undesired, you can signal an error by comparing + // the constructor arguments to the normalized values returned by the YMDHMS + // properties. + // + // COMPARISON + // + // Comparison between civil-time objects considers all six YMDHMS fields, + // regardless of the type's alignment. Comparison between differently aligned + // civil-time types is allowed. + // + // Examples: + // + // absl::CivilDay feb_3(2015, 2, 3); // 2015-02-03 00:00:00 + // absl::CivilDay mar_4(2015, 3, 4); // 2015-03-04 00:00:00 + // // feb_3 < mar_4 + // // absl::CivilYear(feb_3) == absl::CivilYear(mar_4) + // + // absl::CivilSecond feb_3_noon(2015, 2, 3, 12, 0, 0); // 2015-02-03 12:00:00 + // // feb_3 < feb_3_noon + // // feb_3 == absl::CivilDay(feb_3_noon) + // + // // Iterates all the days of February 2015. + // for (absl::CivilDay d(2015, 2, 1); d < absl::CivilMonth(2015, 3); ++d) { + // // ... + // } + // + // ARITHMETIC + // + // Civil-time types support natural arithmetic operators such as addition, + // subtraction, and difference. Arithmetic operates on the civil-time field + // indicated in the type's name. Difference operators require arguments with + // the same alignment and return the answer in units of the alignment. + // + // Example: + // + // absl::CivilDay a(2015, 2, 3); + // ++a; // 2015-02-04 00:00:00 + // --a; // 2015-02-03 00:00:00 + // absl::CivilDay b = a + 1; // 2015-02-04 00:00:00 + // absl::CivilDay c = 1 + b; // 2015-02-05 00:00:00 + // int n = c - a; // n = 2 (civil days) + // int m = c - absl::CivilMonth(c); // Won't compile: different types. + // + // ACCESSORS + // + // Each civil-time type has accessors for all six of the civil-time fields: + // year, month, day, hour, minute, and second. + // + // civil_year_t year() + // int month() + // int day() + // int hour() + // int minute() + // int second() + // + // Recall that fields inferior to the type's alignment will be set to their + // minimum valid value. + // + // Example: + // + // absl::CivilDay d(2015, 6, 28); + // // d.year() == 2015 + // // d.month() == 6 + // // d.day() == 28 + // // d.hour() == 0 + // // d.minute() == 0 + // // d.second() == 0 + // + // CASE STUDY: Adding a month to January 31. + // + // One of the classic questions that arises when considering a civil time + // library (or a date library or a date/time library) is this: + // "What is the result of adding a month to January 31?" + // This is an interesting question because it is unclear what is meant by a + // "month", and several different answers are possible, depending on context: + // + // 1. March 3 (or 2 if a leap year), if "add a month" means to add a month to + // the current month, and adjust the date to overflow the extra days into + // March. In this case the result of "February 31" would be normalized as + // within the civil-time library. + // 2. February 28 (or 29 if a leap year), if "add a month" means to add a + // month, and adjust the date while holding the resulting month constant. + // In this case, the result of "February 31" would be truncated to the last + // day in February. + // 3. An error. The caller may get some error, an exception, an invalid date + // object, or perhaps return `false`. This may make sense because there is + // no single unambiguously correct answer to the question. + // + // Practically speaking, any answer that is not what the programmer intended + // is the wrong answer. + // + // The Abseil time library avoids this problem by making it impossible to + // ask ambiguous questions. All civil-time objects are aligned to a particular + // civil-field boundary (such as aligned to a year, month, day, hour, minute, + // or second), and arithmetic operates on the field to which the object is + // aligned. This means that in order to "add a month" the object must first be + // aligned to a month boundary, which is equivalent to the first day of that + // month. + // + // Of course, there are ways to compute an answer the question at hand using + // this Abseil time library, but they require the programmer to be explicit + // about the answer they expect. To illustrate, let's see how to compute all + // three of the above possible answers to the question of "Jan 31 plus 1 + // month": + // + // Example: + // + // const absl::CivilDay d(2015, 1, 31); + // + // // Answer 1: + // // Add 1 to the month field in the constructor, and rely on normalization. + // const auto normalized = absl::CivilDay(d.year(), d.month() + 1, d.day()); + // // normalized == 2015-03-03 (aka Feb 31) + // + // // Answer 2: + // // Add 1 to month field, capping to the end of next month. + // const auto next_month = absl::CivilMonth(d) + 1; + // const auto last_day_of_next_month = absl::CivilDay(next_month + 1) - 1; + // const auto capped = std::min(normalized, last_day_of_next_month); + // // capped == 2015-02-28 + // + // // Answer 3: + // // Signal an error if the normalized answer is not in next month. + // if (absl::CivilMonth(normalized) != next_month) { + // // error, month overflow + // } + // + using CivilSecond = + time_internal::cctz::detail::civil_time; + using CivilMinute = + time_internal::cctz::detail::civil_time; + using CivilHour = + time_internal::cctz::detail::civil_time; + using CivilDay = + time_internal::cctz::detail::civil_time; + using CivilMonth = + time_internal::cctz::detail::civil_time; + using CivilYear = + time_internal::cctz::detail::civil_time; + + // civil_year_t + // + // Type alias of a civil-time year value. This type is guaranteed to (at least) + // support any year value supported by `time_t`. + // + // Example: + // + // absl::CivilSecond cs = ...; + // absl::civil_year_t y = cs.year(); + // cs = absl::CivilSecond(y, 1, 1, 0, 0, 0); // CivilSecond(CivilYear(cs)) + // + using civil_year_t = time_internal::cctz::year_t; + + // civil_diff_t + // + // Type alias of the difference between two civil-time values. + // This type is used to indicate arguments that are not + // normalized (such as parameters to the civil-time constructors), the results + // of civil-time subtraction, or the operand to civil-time addition. + // + // Example: + // + // absl::civil_diff_t n_sec = cs1 - cs2; // cs1 == cs2 + n_sec; + // + using civil_diff_t = time_internal::cctz::diff_t; + + // Weekday::monday, Weekday::tuesday, Weekday::wednesday, Weekday::thursday, + // Weekday::friday, Weekday::saturday, Weekday::sunday + // + // The Weekday enum class represents the civil-time concept of a "weekday" with + // members for all days of the week. + // + // absl::Weekday wd = absl::Weekday::thursday; + // + using Weekday = time_internal::cctz::weekday; + + // GetWeekday() + // + // Returns the absl::Weekday for the given (realigned) civil-time value. + // + // Example: + // + // absl::CivilDay a(2015, 8, 13); + // absl::Weekday wd = absl::GetWeekday(a); // wd == absl::Weekday::thursday + // + inline Weekday GetWeekday(CivilSecond cs) + { + return time_internal::cctz::get_weekday(cs); + } + + // NextWeekday() + // PrevWeekday() + // + // Returns the absl::CivilDay that strictly follows or precedes a given + // absl::CivilDay, and that falls on the given absl::Weekday. + // + // Example, given the following month: + // + // August 2015 + // Su Mo Tu We Th Fr Sa + // 1 + // 2 3 4 5 6 7 8 + // 9 10 11 12 13 14 15 + // 16 17 18 19 20 21 22 + // 23 24 25 26 27 28 29 + // 30 31 + // + // absl::CivilDay a(2015, 8, 13); + // // absl::GetWeekday(a) == absl::Weekday::thursday + // absl::CivilDay b = absl::NextWeekday(a, absl::Weekday::thursday); + // // b = 2015-08-20 + // absl::CivilDay c = absl::PrevWeekday(a, absl::Weekday::thursday); + // // c = 2015-08-06 + // + // absl::CivilDay d = ... + // // Gets the following Thursday if d is not already Thursday + // absl::CivilDay thurs1 = absl::NextWeekday(d - 1, absl::Weekday::thursday); + // // Gets the previous Thursday if d is not already Thursday + // absl::CivilDay thurs2 = absl::PrevWeekday(d + 1, absl::Weekday::thursday); + // + inline CivilDay NextWeekday(CivilDay cd, Weekday wd) + { + return CivilDay(time_internal::cctz::next_weekday(cd, wd)); + } + inline CivilDay PrevWeekday(CivilDay cd, Weekday wd) + { + return CivilDay(time_internal::cctz::prev_weekday(cd, wd)); + } + + // GetYearDay() + // + // Returns the day-of-year for the given (realigned) civil-time value. + // + // Example: + // + // absl::CivilDay a(2015, 1, 1); + // int yd_jan_1 = absl::GetYearDay(a); // yd_jan_1 = 1 + // absl::CivilDay b(2015, 12, 31); + // int yd_dec_31 = absl::GetYearDay(b); // yd_dec_31 = 365 + // + inline int GetYearDay(CivilSecond cs) + { + return time_internal::cctz::get_yearday(cs); + } + + // FormatCivilTime() + // + // Formats the given civil-time value into a string value of the following + // format: + // + // Type | Format + // --------------------------------- + // CivilSecond | YYYY-MM-DDTHH:MM:SS + // CivilMinute | YYYY-MM-DDTHH:MM + // CivilHour | YYYY-MM-DDTHH + // CivilDay | YYYY-MM-DD + // CivilMonth | YYYY-MM + // CivilYear | YYYY + // + // Example: + // + // absl::CivilDay d = absl::CivilDay(1969, 7, 20); + // std::string day_string = absl::FormatCivilTime(d); // "1969-07-20" + // + std::string FormatCivilTime(CivilSecond c); + std::string FormatCivilTime(CivilMinute c); + std::string FormatCivilTime(CivilHour c); + std::string FormatCivilTime(CivilDay c); + std::string FormatCivilTime(CivilMonth c); + std::string FormatCivilTime(CivilYear c); + + // absl::ParseCivilTime() + // + // Parses a civil-time value from the specified `absl::string_view` into the + // passed output parameter. Returns `true` upon successful parsing. + // + // The expected form of the input string is as follows: + // + // Type | Format + // --------------------------------- + // CivilSecond | YYYY-MM-DDTHH:MM:SS + // CivilMinute | YYYY-MM-DDTHH:MM + // CivilHour | YYYY-MM-DDTHH + // CivilDay | YYYY-MM-DD + // CivilMonth | YYYY-MM + // CivilYear | YYYY + // + // Example: + // + // absl::CivilDay d; + // bool ok = absl::ParseCivilTime("2018-01-02", &d); // OK + // + // Note that parsing will fail if the string's format does not match the + // expected type exactly. `ParseLenientCivilTime()` below is more lenient. + // + bool ParseCivilTime(absl::string_view s, CivilSecond* c); + bool ParseCivilTime(absl::string_view s, CivilMinute* c); + bool ParseCivilTime(absl::string_view s, CivilHour* c); + bool ParseCivilTime(absl::string_view s, CivilDay* c); + bool ParseCivilTime(absl::string_view s, CivilMonth* c); + bool ParseCivilTime(absl::string_view s, CivilYear* c); + + // ParseLenientCivilTime() + // + // Parses any of the formats accepted by `absl::ParseCivilTime()`, but is more + // lenient if the format of the string does not exactly match the associated + // type. + // + // Example: + // + // absl::CivilDay d; + // bool ok = absl::ParseLenientCivilTime("1969-07-20", &d); // OK + // ok = absl::ParseLenientCivilTime("1969-07-20T10", &d); // OK: T10 floored + // ok = absl::ParseLenientCivilTime("1969-07", &d); // OK: day defaults to 1 + // + bool ParseLenientCivilTime(absl::string_view s, CivilSecond* c); + bool ParseLenientCivilTime(absl::string_view s, CivilMinute* c); + bool ParseLenientCivilTime(absl::string_view s, CivilHour* c); + bool ParseLenientCivilTime(absl::string_view s, CivilDay* c); + bool ParseLenientCivilTime(absl::string_view s, CivilMonth* c); + bool ParseLenientCivilTime(absl::string_view s, CivilYear* c); + + namespace time_internal + { // For functions found via ADL on civil-time tags. + + // Streaming Operators + // + // Each civil-time type may be sent to an output stream using operator<<(). + // The result matches the string produced by `FormatCivilTime()`. + // + // Example: + // + // absl::CivilDay d = absl::CivilDay(1969, 7, 20); + // std::cout << "Date is: " << d << "\n"; + // + std::ostream& operator<<(std::ostream& os, CivilYear y); + std::ostream& operator<<(std::ostream& os, CivilMonth m); + std::ostream& operator<<(std::ostream& os, CivilDay d); + std::ostream& operator<<(std::ostream& os, CivilHour h); + std::ostream& operator<<(std::ostream& os, CivilMinute m); + std::ostream& operator<<(std::ostream& os, CivilSecond s); + + // AbslParseFlag() + // + // Parses the command-line flag string representation `s` into a civil-time + // value. Flags must be specified in a format that is valid for + // `absl::ParseLenientCivilTime()`. + bool AbslParseFlag(absl::string_view s, CivilSecond* c, std::string* error); + bool AbslParseFlag(absl::string_view s, CivilMinute* c, std::string* error); + bool AbslParseFlag(absl::string_view s, CivilHour* c, std::string* error); + bool AbslParseFlag(absl::string_view s, CivilDay* c, std::string* error); + bool AbslParseFlag(absl::string_view s, CivilMonth* c, std::string* error); + bool AbslParseFlag(absl::string_view s, CivilYear* c, std::string* error); + + // AbslUnparseFlag() + // + // Unparses a civil-time value into a command-line string representation using + // the format specified by `absl::ParseCivilTime()`. + std::string AbslUnparseFlag(CivilSecond c); + std::string AbslUnparseFlag(CivilMinute c); + std::string AbslUnparseFlag(CivilHour c); + std::string AbslUnparseFlag(CivilDay c); + std::string AbslUnparseFlag(CivilMonth c); + std::string AbslUnparseFlag(CivilYear c); + + } // namespace time_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_CIVIL_TIME_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/clock.h b/CAPI/cpp/grpc/include/absl/time/clock.h new file mode 100644 index 00000000..459d1132 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/clock.h @@ -0,0 +1,78 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: clock.h +// ----------------------------------------------------------------------------- +// +// This header file contains utility functions for working with the system-wide +// realtime clock. For descriptions of the main time abstractions used within +// this header file, consult the time.h header file. +#ifndef ABSL_TIME_CLOCK_H_ +#define ABSL_TIME_CLOCK_H_ + +#include "absl/base/macros.h" +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Now() + // + // Returns the current time, expressed as an `absl::Time` absolute time value. + absl::Time Now(); + + // GetCurrentTimeNanos() + // + // Returns the current time, expressed as a count of nanoseconds since the Unix + // Epoch (https://en.wikipedia.org/wiki/Unix_time). Prefer `absl::Now()` instead + // for all but the most performance-sensitive cases (i.e. when you are calling + // this function hundreds of thousands of times per second). + int64_t GetCurrentTimeNanos(); + + // SleepFor() + // + // Sleeps for the specified duration, expressed as an `absl::Duration`. + // + // Notes: + // * Signal interruptions will not reduce the sleep duration. + // * Returns immediately when passed a nonpositive duration. + void SleepFor(absl::Duration duration); + + ABSL_NAMESPACE_END +} // namespace absl + +// ----------------------------------------------------------------------------- +// Implementation Details +// ----------------------------------------------------------------------------- + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" +{ + void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration); +} // extern "C" + +inline void absl::SleepFor(absl::Duration duration) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor) + (duration); +} + +#endif // ABSL_TIME_CLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time.h new file mode 100644 index 00000000..8d6aa9fd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time.h @@ -0,0 +1,335 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_ +#define ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_ + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/civil_time_detail.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // The term "civil time" refers to the legally recognized human-scale time + // that is represented by the six fields YYYY-MM-DD hh:mm:ss. Modern-day civil + // time follows the Gregorian Calendar and is a time-zone-independent concept. + // A "date" is perhaps the most common example of a civil time (represented in + // this library as cctz::civil_day). This library provides six classes and a + // handful of functions that help with rounding, iterating, and arithmetic on + // civil times while avoiding complications like daylight-saving time (DST). + // + // The following six classes form the core of this civil-time library: + // + // * civil_second + // * civil_minute + // * civil_hour + // * civil_day + // * civil_month + // * civil_year + // + // Each class is a simple value type with the same interface for construction + // and the same six accessors for each of the civil fields (year, month, day, + // hour, minute, and second, aka YMDHMS). These classes differ only in their + // alignment, which is indicated by the type name and specifies the field on + // which arithmetic operates. + // + // Each class can be constructed by passing up to six optional integer + // arguments representing the YMDHMS fields (in that order) to the + // constructor. Omitted fields are assigned their minimum valid value. Hours, + // minutes, and seconds will be set to 0, month and day will be set to 1, and + // since there is no minimum valid year, it will be set to 1970. So, a + // default-constructed civil-time object will have YMDHMS fields representing + // "1970-01-01 00:00:00". Fields that are out-of-range are normalized (e.g., + // October 32 -> November 1) so that all civil-time objects represent valid + // values. + // + // Each civil-time class is aligned to the civil-time field indicated in the + // class's name after normalization. Alignment is performed by setting all the + // inferior fields to their minimum valid value (as described above). The + // following are examples of how each of the six types would align the fields + // representing November 22, 2015 at 12:34:56 in the afternoon. (Note: the + // string format used here is not important; it's just a shorthand way of + // showing the six YMDHMS fields.) + // + // civil_second 2015-11-22 12:34:56 + // civil_minute 2015-11-22 12:34:00 + // civil_hour 2015-11-22 12:00:00 + // civil_day 2015-11-22 00:00:00 + // civil_month 2015-11-01 00:00:00 + // civil_year 2015-01-01 00:00:00 + // + // Each civil-time type performs arithmetic on the field to which it is + // aligned. This means that adding 1 to a civil_day increments the day field + // (normalizing as necessary), and subtracting 7 from a civil_month operates + // on the month field (normalizing as necessary). All arithmetic produces a + // valid civil time. Difference requires two similarly aligned civil-time + // objects and returns the scalar answer in units of the objects' alignment. + // For example, the difference between two civil_hour objects will give an + // answer in units of civil hours. + // + // In addition to the six civil-time types just described, there are + // a handful of helper functions and algorithms for performing common + // calculations. These are described below. + // + // Note: In C++14 and later, this library is usable in a constexpr context. + // + // CONSTRUCTION: + // + // Each of the civil-time types can be constructed in two ways: by directly + // passing to the constructor up to six (optional) integers representing the + // YMDHMS fields, or by copying the YMDHMS fields from a differently aligned + // civil-time type. + // + // civil_day default_value; // 1970-01-01 00:00:00 + // + // civil_day a(2015, 2, 3); // 2015-02-03 00:00:00 + // civil_day b(2015, 2, 3, 4, 5, 6); // 2015-02-03 00:00:00 + // civil_day c(2015); // 2015-01-01 00:00:00 + // + // civil_second ss(2015, 2, 3, 4, 5, 6); // 2015-02-03 04:05:06 + // civil_minute mm(ss); // 2015-02-03 04:05:00 + // civil_hour hh(mm); // 2015-02-03 04:00:00 + // civil_day d(hh); // 2015-02-03 00:00:00 + // civil_month m(d); // 2015-02-01 00:00:00 + // civil_year y(m); // 2015-01-01 00:00:00 + // + // m = civil_month(y); // 2015-01-01 00:00:00 + // d = civil_day(m); // 2015-01-01 00:00:00 + // hh = civil_hour(d); // 2015-01-01 00:00:00 + // mm = civil_minute(hh); // 2015-01-01 00:00:00 + // ss = civil_second(mm); // 2015-01-01 00:00:00 + // + // ALIGNMENT CONVERSION: + // + // The alignment of a civil-time object cannot change, but the object may be + // used to construct a new object with a different alignment. This is referred + // to as "realigning". When realigning to a type with the same or more + // precision (e.g., civil_day -> civil_second), the conversion may be + // performed implicitly since no information is lost. However, if information + // could be discarded (e.g., civil_second -> civil_day), the conversion must + // be explicit at the call site. + // + // void fun(const civil_day& day); + // + // civil_second cs; + // fun(cs); // Won't compile because data may be discarded + // fun(civil_day(cs)); // OK: explicit conversion + // + // civil_day cd; + // fun(cd); // OK: no conversion needed + // + // civil_month cm; + // fun(cm); // OK: implicit conversion to civil_day + // + // NORMALIZATION: + // + // Integer arguments passed to the constructor may be out-of-range, in which + // case they are normalized to produce a valid civil-time object. This enables + // natural arithmetic on constructor arguments without worrying about the + // field's range. Normalization guarantees that there are no invalid + // civil-time objects. + // + // civil_day d(2016, 10, 32); // Out-of-range day; normalized to 2016-11-01 + // + // Note: If normalization is undesired, you can signal an error by comparing + // the constructor arguments to the normalized values returned by the YMDHMS + // properties. + // + // PROPERTIES: + // + // All civil-time types have accessors for all six of the civil-time fields: + // year, month, day, hour, minute, and second. Recall that fields inferior to + // the type's alignment will be set to their minimum valid value. + // + // civil_day d(2015, 6, 28); + // // d.year() == 2015 + // // d.month() == 6 + // // d.day() == 28 + // // d.hour() == 0 + // // d.minute() == 0 + // // d.second() == 0 + // + // COMPARISON: + // + // Comparison always considers all six YMDHMS fields, regardless of the type's + // alignment. Comparison between differently aligned civil-time types is + // allowed. + // + // civil_day feb_3(2015, 2, 3); // 2015-02-03 00:00:00 + // civil_day mar_4(2015, 3, 4); // 2015-03-04 00:00:00 + // // feb_3 < mar_4 + // // civil_year(feb_3) == civil_year(mar_4) + // + // civil_second feb_3_noon(2015, 2, 3, 12, 0, 0); // 2015-02-03 12:00:00 + // // feb_3 < feb_3_noon + // // feb_3 == civil_day(feb_3_noon) + // + // // Iterates all the days of February 2015. + // for (civil_day d(2015, 2, 1); d < civil_month(2015, 3); ++d) { + // // ... + // } + // + // STREAMING: + // + // Each civil-time type may be sent to an output stream using operator<<(). + // The output format follows the pattern "YYYY-MM-DDThh:mm:ss" where fields + // inferior to the type's alignment are omitted. + // + // civil_second cs(2015, 2, 3, 4, 5, 6); + // std::cout << cs << "\n"; // Outputs: 2015-02-03T04:05:06 + // + // civil_day cd(cs); + // std::cout << cd << "\n"; // Outputs: 2015-02-03 + // + // civil_year cy(cs); + // std::cout << cy << "\n"; // Outputs: 2015 + // + // ARITHMETIC: + // + // Civil-time types support natural arithmetic operators such as addition, + // subtraction, and difference. Arithmetic operates on the civil-time field + // indicated in the type's name. Difference requires arguments with the same + // alignment and returns the answer in units of the alignment. + // + // civil_day a(2015, 2, 3); + // ++a; // 2015-02-04 00:00:00 + // --a; // 2015-02-03 00:00:00 + // civil_day b = a + 1; // 2015-02-04 00:00:00 + // civil_day c = 1 + b; // 2015-02-05 00:00:00 + // int n = c - a; // n = 2 (civil days) + // int m = c - civil_month(c); // Won't compile: different types. + // + // EXAMPLE: Adding a month to January 31. + // + // One of the classic questions that arises when considering a civil-time + // library (or a date library or a date/time library) is this: "What happens + // when you add a month to January 31?" This is an interesting question + // because there could be a number of possible answers: + // + // 1. March 3 (or 2 if a leap year). This may make sense if the operation + // wants the equivalent of February 31. + // 2. February 28 (or 29 if a leap year). This may make sense if the operation + // wants the last day of January to go to the last day of February. + // 3. Error. The caller may get some error, an exception, an invalid date + // object, or maybe false is returned. This may make sense because there is + // no single unambiguously correct answer to the question. + // + // Practically speaking, any answer that is not what the programmer intended + // is the wrong answer. + // + // This civil-time library avoids the problem by making it impossible to ask + // ambiguous questions. All civil-time objects are aligned to a particular + // civil-field boundary (such as aligned to a year, month, day, hour, minute, + // or second), and arithmetic operates on the field to which the object is + // aligned. This means that in order to "add a month" the object must first be + // aligned to a month boundary, which is equivalent to the first day of that + // month. + // + // Of course, there are ways to compute an answer the question at hand using + // this civil-time library, but they require the programmer to be explicit + // about the answer they expect. To illustrate, let's see how to compute all + // three of the above possible answers to the question of "Jan 31 plus 1 + // month": + // + // const civil_day d(2015, 1, 31); + // + // // Answer 1: + // // Add 1 to the month field in the constructor, and rely on normalization. + // const auto ans_normalized = civil_day(d.year(), d.month() + 1, d.day()); + // // ans_normalized == 2015-03-03 (aka Feb 31) + // + // // Answer 2: + // // Add 1 to month field, capping to the end of next month. + // const auto next_month = civil_month(d) + 1; + // const auto last_day_of_next_month = civil_day(next_month + 1) - 1; + // const auto ans_capped = std::min(ans_normalized, last_day_of_next_month); + // // ans_capped == 2015-02-28 + // + // // Answer 3: + // // Signal an error if the normalized answer is not in next month. + // if (civil_month(ans_normalized) != next_month) { + // // error, month overflow + // } + // + using civil_year = detail::civil_year; + using civil_month = detail::civil_month; + using civil_day = detail::civil_day; + using civil_hour = detail::civil_hour; + using civil_minute = detail::civil_minute; + using civil_second = detail::civil_second; + + // An enum class with members monday, tuesday, wednesday, thursday, friday, + // saturday, and sunday. These enum values may be sent to an output stream + // using operator<<(). The result is the full weekday name in English with a + // leading capital letter. + // + // weekday wd = weekday::thursday; + // std::cout << wd << "\n"; // Outputs: Thursday + // + using detail::weekday; + + // Returns the weekday for the given civil-time value. + // + // civil_day a(2015, 8, 13); + // weekday wd = get_weekday(a); // wd == weekday::thursday + // + using detail::get_weekday; + + // Returns the civil_day that strictly follows or precedes the given + // civil_day, and that falls on the given weekday. + // + // For example, given: + // + // August 2015 + // Su Mo Tu We Th Fr Sa + // 1 + // 2 3 4 5 6 7 8 + // 9 10 11 12 13 14 15 + // 16 17 18 19 20 21 22 + // 23 24 25 26 27 28 29 + // 30 31 + // + // civil_day a(2015, 8, 13); // get_weekday(a) == weekday::thursday + // civil_day b = next_weekday(a, weekday::thursday); // b = 2015-08-20 + // civil_day c = prev_weekday(a, weekday::thursday); // c = 2015-08-06 + // + // civil_day d = ... + // // Gets the following Thursday if d is not already Thursday + // civil_day thurs1 = next_weekday(d - 1, weekday::thursday); + // // Gets the previous Thursday if d is not already Thursday + // civil_day thurs2 = prev_weekday(d + 1, weekday::thursday); + // + using detail::next_weekday; + using detail::prev_weekday; + + // Returns the day-of-year for the given civil-time value. + // + // civil_day a(2015, 1, 1); + // int yd_jan_1 = get_yearday(a); // yd_jan_1 = 1 + // civil_day b(2015, 12, 31); + // int yd_dec_31 = get_yearday(b); // yd_dec_31 = 365 + // + using detail::get_yearday; + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time_detail.h new file mode 100644 index 00000000..d696b427 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -0,0 +1,832 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_ +#define ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" + +// Disable constexpr support unless we are in C++14 mode. +#if __cpp_constexpr >= 201304 || (defined(_MSC_VER) && _MSC_VER >= 1910) +#define CONSTEXPR_D constexpr // data +#define CONSTEXPR_F constexpr // function +#define CONSTEXPR_M constexpr // member +#else +#define CONSTEXPR_D const +#define CONSTEXPR_F inline +#define CONSTEXPR_M +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // Support years that at least span the range of 64-bit time_t values. + using year_t = std::int_fast64_t; + + // Type alias that indicates an argument is not normalized (e.g., the + // constructor parameters and operands/results of addition/subtraction). + using diff_t = std::int_fast64_t; + + namespace detail + { + + // Type aliases that indicate normalized argument values. + using month_t = std::int_fast8_t; // [1:12] + using day_t = std::int_fast8_t; // [1:31] + using hour_t = std::int_fast8_t; // [0:23] + using minute_t = std::int_fast8_t; // [0:59] + using second_t = std::int_fast8_t; // [0:59] + + // Normalized civil-time fields: Y-M-D HH:MM:SS. + struct fields + { + CONSTEXPR_M fields(year_t year, month_t month, day_t day, hour_t hour, minute_t minute, second_t second) : + y(year), + m(month), + d(day), + hh(hour), + mm(minute), + ss(second) + { + } + std::int_least64_t y; + std::int_least8_t m; + std::int_least8_t d; + std::int_least8_t hh; + std::int_least8_t mm; + std::int_least8_t ss; + }; + + struct second_tag + { + }; + struct minute_tag : second_tag + { + }; + struct hour_tag : minute_tag + { + }; + struct day_tag : hour_tag + { + }; + struct month_tag : day_tag + { + }; + struct year_tag : month_tag + { + }; + + //////////////////////////////////////////////////////////////////////// + + // Field normalization (without avoidable overflow). + + namespace impl + { + + CONSTEXPR_F bool is_leap_year(year_t y) noexcept + { + return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); + } + CONSTEXPR_F int year_index(year_t y, month_t m) noexcept + { + const int yi = static_cast((y + (m > 2)) % 400); + return yi < 0 ? yi + 400 : yi; + } + CONSTEXPR_F int days_per_century(int yi) noexcept + { + return 36524 + (yi == 0 || yi > 300); + } + CONSTEXPR_F int days_per_4years(int yi) noexcept + { + return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96); + } + CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept + { + return is_leap_year(y + (m > 2)) ? 366 : 365; + } + CONSTEXPR_F int days_per_month(year_t y, month_t m) noexcept + { + CONSTEXPR_D int k_days_per_month[1 + 12] = { + -1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 // non leap year + }; + return k_days_per_month[m] + (m == 2 && is_leap_year(y)); + } + + CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh, minute_t mm, second_t ss) noexcept + { + year_t ey = y % 400; + const year_t oey = ey; + ey += (cd / 146097) * 400; + cd %= 146097; + if (cd < 0) + { + ey -= 400; + cd += 146097; + } + ey += (d / 146097) * 400; + d = d % 146097 + cd; + if (d > 0) + { + if (d > 146097) + { + ey += 400; + d -= 146097; + } + } + else + { + if (d > -365) + { + // We often hit the previous year when stepping a civil time backwards, + // so special case it to avoid counting up by 100/4/1-year chunks. + ey -= 1; + d += days_per_year(ey, m); + } + else + { + ey -= 400; + d += 146097; + } + } + if (d > 365) + { + int yi = year_index(ey, m); // Index into Gregorian 400 year cycle. + for (;;) + { + int n = days_per_century(yi); + if (d <= n) + break; + d -= n; + ey += 100; + yi += 100; + if (yi >= 400) + yi -= 400; + } + for (;;) + { + int n = days_per_4years(yi); + if (d <= n) + break; + d -= n; + ey += 4; + yi += 4; + if (yi >= 400) + yi -= 400; + } + for (;;) + { + int n = days_per_year(ey, m); + if (d <= n) + break; + d -= n; + ++ey; + } + } + if (d > 28) + { + for (;;) + { + int n = days_per_month(ey, m); + if (d <= n) + break; + d -= n; + if (++m > 12) + { + ++ey; + m = 1; + } + } + } + return fields(y + (ey - oey), m, static_cast(d), hh, mm, ss); + } + CONSTEXPR_F fields n_mon(year_t y, diff_t m, diff_t d, diff_t cd, hour_t hh, minute_t mm, second_t ss) noexcept + { + if (m != 12) + { + y += m / 12; + m %= 12; + if (m <= 0) + { + y -= 1; + m += 12; + } + } + return n_day(y, static_cast(m), d, cd, hh, mm, ss); + } + CONSTEXPR_F fields n_hour(year_t y, diff_t m, diff_t d, diff_t cd, diff_t hh, minute_t mm, second_t ss) noexcept + { + cd += hh / 24; + hh %= 24; + if (hh < 0) + { + cd -= 1; + hh += 24; + } + return n_mon(y, m, d, cd, static_cast(hh), mm, ss); + } + CONSTEXPR_F fields n_min(year_t y, diff_t m, diff_t d, diff_t hh, diff_t ch, diff_t mm, second_t ss) noexcept + { + ch += mm / 60; + mm %= 60; + if (mm < 0) + { + ch -= 1; + mm += 60; + } + return n_hour(y, m, d, hh / 24 + ch / 24, hh % 24 + ch % 24, static_cast(mm), ss); + } + CONSTEXPR_F fields n_sec(year_t y, diff_t m, diff_t d, diff_t hh, diff_t mm, diff_t ss) noexcept + { + // Optimization for when (non-constexpr) fields are already normalized. + if (0 <= ss && ss < 60) + { + const second_t nss = static_cast(ss); + if (0 <= mm && mm < 60) + { + const minute_t nmm = static_cast(mm); + if (0 <= hh && hh < 24) + { + const hour_t nhh = static_cast(hh); + if (1 <= d && d <= 28 && 1 <= m && m <= 12) + { + const day_t nd = static_cast(d); + const month_t nm = static_cast(m); + return fields(y, nm, nd, nhh, nmm, nss); + } + return n_mon(y, m, d, 0, nhh, nmm, nss); + } + return n_hour(y, m, d, hh / 24, hh % 24, nmm, nss); + } + return n_min(y, m, d, hh, mm / 60, mm % 60, nss); + } + diff_t cm = ss / 60; + ss %= 60; + if (ss < 0) + { + cm -= 1; + ss += 60; + } + return n_min(y, m, d, hh, mm / 60 + cm / 60, mm % 60 + cm % 60, static_cast(ss)); + } + + } // namespace impl + + //////////////////////////////////////////////////////////////////////// + + // Increments the indicated (normalized) field by "n". + CONSTEXPR_F fields step(second_tag, fields f, diff_t n) noexcept + { + return impl::n_sec(f.y, f.m, f.d, f.hh, f.mm + n / 60, f.ss + n % 60); + } + CONSTEXPR_F fields step(minute_tag, fields f, diff_t n) noexcept + { + return impl::n_min(f.y, f.m, f.d, f.hh + n / 60, 0, f.mm + n % 60, f.ss); + } + CONSTEXPR_F fields step(hour_tag, fields f, diff_t n) noexcept + { + return impl::n_hour(f.y, f.m, f.d + n / 24, 0, f.hh + n % 24, f.mm, f.ss); + } + CONSTEXPR_F fields step(day_tag, fields f, diff_t n) noexcept + { + return impl::n_day(f.y, f.m, f.d, n, f.hh, f.mm, f.ss); + } + CONSTEXPR_F fields step(month_tag, fields f, diff_t n) noexcept + { + return impl::n_mon(f.y + n / 12, f.m + n % 12, f.d, 0, f.hh, f.mm, f.ss); + } + CONSTEXPR_F fields step(year_tag, fields f, diff_t n) noexcept + { + return fields(f.y + n, f.m, f.d, f.hh, f.mm, f.ss); + } + + //////////////////////////////////////////////////////////////////////// + + namespace impl + { + + // Returns (v * f + a) but avoiding intermediate overflow when possible. + CONSTEXPR_F diff_t scale_add(diff_t v, diff_t f, diff_t a) noexcept + { + return (v < 0) ? ((v + 1) * f + a) - f : ((v - 1) * f + a) + f; + } + + // Map a (normalized) Y/M/D to the number of days before/after 1970-01-01. + // Probably overflows for years outside [-292277022656:292277026595]. + CONSTEXPR_F diff_t ymd_ord(year_t y, month_t m, day_t d) noexcept + { + const diff_t eyear = (m <= 2) ? y - 1 : y; + const diff_t era = (eyear >= 0 ? eyear : eyear - 399) / 400; + const diff_t yoe = eyear - era * 400; + const diff_t doy = (153 * (m + (m > 2 ? -3 : 9)) + 2) / 5 + d - 1; + const diff_t doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; + return era * 146097 + doe - 719468; + } + + // Returns the difference in days between two normalized Y-M-D tuples. + // ymd_ord() will encounter integer overflow given extreme year values, + // yet the difference between two such extreme values may actually be + // small, so we take a little care to avoid overflow when possible by + // exploiting the 146097-day cycle. + CONSTEXPR_F diff_t day_difference(year_t y1, month_t m1, day_t d1, year_t y2, month_t m2, day_t d2) noexcept + { + const diff_t a_c4_off = y1 % 400; + const diff_t b_c4_off = y2 % 400; + diff_t c4_diff = (y1 - a_c4_off) - (y2 - b_c4_off); + diff_t delta = ymd_ord(a_c4_off, m1, d1) - ymd_ord(b_c4_off, m2, d2); + if (c4_diff > 0 && delta < 0) + { + delta += 2 * 146097; + c4_diff -= 2 * 400; + } + else if (c4_diff < 0 && delta > 0) + { + delta -= 2 * 146097; + c4_diff += 2 * 400; + } + return (c4_diff / 400 * 146097) + delta; + } + + } // namespace impl + + // Returns the difference between fields structs using the indicated unit. + CONSTEXPR_F diff_t difference(year_tag, fields f1, fields f2) noexcept + { + return f1.y - f2.y; + } + CONSTEXPR_F diff_t difference(month_tag, fields f1, fields f2) noexcept + { + return impl::scale_add(difference(year_tag{}, f1, f2), 12, (f1.m - f2.m)); + } + CONSTEXPR_F diff_t difference(day_tag, fields f1, fields f2) noexcept + { + return impl::day_difference(f1.y, f1.m, f1.d, f2.y, f2.m, f2.d); + } + CONSTEXPR_F diff_t difference(hour_tag, fields f1, fields f2) noexcept + { + return impl::scale_add(difference(day_tag{}, f1, f2), 24, (f1.hh - f2.hh)); + } + CONSTEXPR_F diff_t difference(minute_tag, fields f1, fields f2) noexcept + { + return impl::scale_add(difference(hour_tag{}, f1, f2), 60, (f1.mm - f2.mm)); + } + CONSTEXPR_F diff_t difference(second_tag, fields f1, fields f2) noexcept + { + return impl::scale_add(difference(minute_tag{}, f1, f2), 60, f1.ss - f2.ss); + } + + //////////////////////////////////////////////////////////////////////// + + // Aligns the (normalized) fields struct to the indicated field. + CONSTEXPR_F fields align(second_tag, fields f) noexcept + { + return f; + } + CONSTEXPR_F fields align(minute_tag, fields f) noexcept + { + return fields{f.y, f.m, f.d, f.hh, f.mm, 0}; + } + CONSTEXPR_F fields align(hour_tag, fields f) noexcept + { + return fields{f.y, f.m, f.d, f.hh, 0, 0}; + } + CONSTEXPR_F fields align(day_tag, fields f) noexcept + { + return fields{f.y, f.m, f.d, 0, 0, 0}; + } + CONSTEXPR_F fields align(month_tag, fields f) noexcept + { + return fields{f.y, f.m, 1, 0, 0, 0}; + } + CONSTEXPR_F fields align(year_tag, fields f) noexcept + { + return fields{f.y, 1, 1, 0, 0, 0}; + } + + //////////////////////////////////////////////////////////////////////// + + namespace impl + { + + template + H AbslHashValueImpl(second_tag, H h, fields f) + { + return H::combine(std::move(h), f.y, f.m, f.d, f.hh, f.mm, f.ss); + } + template + H AbslHashValueImpl(minute_tag, H h, fields f) + { + return H::combine(std::move(h), f.y, f.m, f.d, f.hh, f.mm); + } + template + H AbslHashValueImpl(hour_tag, H h, fields f) + { + return H::combine(std::move(h), f.y, f.m, f.d, f.hh); + } + template + H AbslHashValueImpl(day_tag, H h, fields f) + { + return H::combine(std::move(h), f.y, f.m, f.d); + } + template + H AbslHashValueImpl(month_tag, H h, fields f) + { + return H::combine(std::move(h), f.y, f.m); + } + template + H AbslHashValueImpl(year_tag, H h, fields f) + { + return H::combine(std::move(h), f.y); + } + + } // namespace impl + + //////////////////////////////////////////////////////////////////////// + + template + class civil_time + { + public: + explicit CONSTEXPR_M civil_time(year_t y, diff_t m = 1, diff_t d = 1, diff_t hh = 0, diff_t mm = 0, diff_t ss = 0) noexcept + : + civil_time(impl::n_sec(y, m, d, hh, mm, ss)) + { + } + + CONSTEXPR_M civil_time() noexcept : + f_{1970, 1, 1, 0, 0, 0} + { + } + civil_time(const civil_time&) = default; + civil_time& operator=(const civil_time&) = default; + + // Conversion between civil times of different alignment. Conversion to + // a more precise alignment is allowed implicitly (e.g., day -> hour), + // but conversion where information is discarded must be explicit + // (e.g., second -> minute). + template + using preserves_data = + typename std::enable_if::value>::type; + template + CONSTEXPR_M civil_time(const civil_time& ct, preserves_data* = nullptr) noexcept + : + civil_time(ct.f_) + { + } + template + explicit CONSTEXPR_M civil_time(const civil_time& ct, preserves_data* = nullptr) noexcept + : + civil_time(ct.f_) + { + } + + // Factories for the maximum/minimum representable civil_time. + static CONSTEXPR_F civil_time(max)() + { + const auto max_year = (std::numeric_limits::max)(); + return civil_time(max_year, 12, 31, 23, 59, 59); + } + static CONSTEXPR_F civil_time(min)() + { + const auto min_year = (std::numeric_limits::min)(); + return civil_time(min_year, 1, 1, 0, 0, 0); + } + + // Field accessors. Note: All but year() return an int. + CONSTEXPR_M year_t year() const noexcept + { + return f_.y; + } + CONSTEXPR_M int month() const noexcept + { + return f_.m; + } + CONSTEXPR_M int day() const noexcept + { + return f_.d; + } + CONSTEXPR_M int hour() const noexcept + { + return f_.hh; + } + CONSTEXPR_M int minute() const noexcept + { + return f_.mm; + } + CONSTEXPR_M int second() const noexcept + { + return f_.ss; + } + + // Assigning arithmetic. + CONSTEXPR_M civil_time& operator+=(diff_t n) noexcept + { + return *this = *this + n; + } + CONSTEXPR_M civil_time& operator-=(diff_t n) noexcept + { + return *this = *this - n; + } + CONSTEXPR_M civil_time& operator++() noexcept + { + return *this += 1; + } + CONSTEXPR_M civil_time operator++(int) noexcept + { + const civil_time a = *this; + ++*this; + return a; + } + CONSTEXPR_M civil_time& operator--() noexcept + { + return *this -= 1; + } + CONSTEXPR_M civil_time operator--(int) noexcept + { + const civil_time a = *this; + --*this; + return a; + } + + // Binary arithmetic operators. + friend CONSTEXPR_F civil_time operator+(civil_time a, diff_t n) noexcept + { + return civil_time(step(T{}, a.f_, n)); + } + friend CONSTEXPR_F civil_time operator+(diff_t n, civil_time a) noexcept + { + return a + n; + } + friend CONSTEXPR_F civil_time operator-(civil_time a, diff_t n) noexcept + { + return n != (std::numeric_limits::min)() ? civil_time(step(T{}, a.f_, -n)) : civil_time(step(T{}, step(T{}, a.f_, -(n + 1)), 1)); + } + friend CONSTEXPR_F diff_t operator-(civil_time lhs, civil_time rhs) noexcept + { + return difference(T{}, lhs.f_, rhs.f_); + } + + template + friend H AbslHashValue(H h, civil_time a) + { + return impl::AbslHashValueImpl(T{}, std::move(h), a.f_); + } + + private: + // All instantiations of this template are allowed to call the following + // private constructor and access the private fields member. + template + friend class civil_time; + + // The designated constructor that all others eventually call. + explicit CONSTEXPR_M civil_time(fields f) noexcept : + f_(align(T{}, f)) + { + } + + fields f_; + }; + + // Disallows difference between differently aligned types. + // auto n = civil_day(...) - civil_hour(...); // would be confusing. + template + CONSTEXPR_F diff_t operator-(civil_time, civil_time) = delete; + + using civil_year = civil_time; + using civil_month = civil_time; + using civil_day = civil_time; + using civil_hour = civil_time; + using civil_minute = civil_time; + using civil_second = civil_time; + + //////////////////////////////////////////////////////////////////////// + + // Relational operators that work with differently aligned objects. + // Always compares all six fields. + template + CONSTEXPR_F bool operator<(const civil_time& lhs, const civil_time& rhs) noexcept + { + return ( + lhs.year() < rhs.year() || + (lhs.year() == rhs.year() && + (lhs.month() < rhs.month() || + (lhs.month() == rhs.month() && + (lhs.day() < rhs.day() || (lhs.day() == rhs.day() && + (lhs.hour() < rhs.hour() || + (lhs.hour() == rhs.hour() && + (lhs.minute() < rhs.minute() || + (lhs.minute() == rhs.minute() && + (lhs.second() < rhs.second())))))))))) + ); + } + template + CONSTEXPR_F bool operator<=(const civil_time& lhs, const civil_time& rhs) noexcept + { + return !(rhs < lhs); + } + template + CONSTEXPR_F bool operator>=(const civil_time& lhs, const civil_time& rhs) noexcept + { + return !(lhs < rhs); + } + template + CONSTEXPR_F bool operator>(const civil_time& lhs, const civil_time& rhs) noexcept + { + return rhs < lhs; + } + template + CONSTEXPR_F bool operator==(const civil_time& lhs, const civil_time& rhs) noexcept + { + return lhs.year() == rhs.year() && lhs.month() == rhs.month() && + lhs.day() == rhs.day() && lhs.hour() == rhs.hour() && + lhs.minute() == rhs.minute() && lhs.second() == rhs.second(); + } + template + CONSTEXPR_F bool operator!=(const civil_time& lhs, const civil_time& rhs) noexcept + { + return !(lhs == rhs); + } + + //////////////////////////////////////////////////////////////////////// + + enum class weekday + { + monday, + tuesday, + wednesday, + thursday, + friday, + saturday, + sunday, + }; + + CONSTEXPR_F weekday get_weekday(const civil_second& cs) noexcept + { + CONSTEXPR_D weekday k_weekday_by_mon_off[13] = { + weekday::monday, + weekday::tuesday, + weekday::wednesday, + weekday::thursday, + weekday::friday, + weekday::saturday, + weekday::sunday, + weekday::monday, + weekday::tuesday, + weekday::wednesday, + weekday::thursday, + weekday::friday, + weekday::saturday, + }; + CONSTEXPR_D int k_weekday_offsets[1 + 12] = { + -1, + 0, + 3, + 2, + 5, + 0, + 3, + 5, + 1, + 4, + 6, + 2, + 4, + }; + year_t wd = 2400 + (cs.year() % 400) - (cs.month() < 3); + wd += wd / 4 - wd / 100 + wd / 400; + wd += k_weekday_offsets[cs.month()] + cs.day(); + return k_weekday_by_mon_off[wd % 7 + 6]; + } + + //////////////////////////////////////////////////////////////////////// + + CONSTEXPR_F civil_day next_weekday(civil_day cd, weekday wd) noexcept + { + CONSTEXPR_D weekday k_weekdays_forw[14] = { + weekday::monday, + weekday::tuesday, + weekday::wednesday, + weekday::thursday, + weekday::friday, + weekday::saturday, + weekday::sunday, + weekday::monday, + weekday::tuesday, + weekday::wednesday, + weekday::thursday, + weekday::friday, + weekday::saturday, + weekday::sunday, + }; + weekday base = get_weekday(cd); + for (int i = 0;; ++i) + { + if (base == k_weekdays_forw[i]) + { + for (int j = i + 1;; ++j) + { + if (wd == k_weekdays_forw[j]) + { + return cd + (j - i); + } + } + } + } + } + + CONSTEXPR_F civil_day prev_weekday(civil_day cd, weekday wd) noexcept + { + CONSTEXPR_D weekday k_weekdays_back[14] = { + weekday::sunday, + weekday::saturday, + weekday::friday, + weekday::thursday, + weekday::wednesday, + weekday::tuesday, + weekday::monday, + weekday::sunday, + weekday::saturday, + weekday::friday, + weekday::thursday, + weekday::wednesday, + weekday::tuesday, + weekday::monday, + }; + weekday base = get_weekday(cd); + for (int i = 0;; ++i) + { + if (base == k_weekdays_back[i]) + { + for (int j = i + 1;; ++j) + { + if (wd == k_weekdays_back[j]) + { + return cd - (j - i); + } + } + } + } + } + + CONSTEXPR_F int get_yearday(const civil_second& cs) noexcept + { + CONSTEXPR_D int k_month_offsets[1 + 12] = { + -1, + 0, + 31, + 59, + 90, + 120, + 151, + 181, + 212, + 243, + 273, + 304, + 334, + }; + const int feb29 = (cs.month() > 2 && impl::is_leap_year(cs.year())); + return k_month_offsets[cs.month()] + feb29 + cs.day(); + } + + //////////////////////////////////////////////////////////////////////// + + std::ostream& operator<<(std::ostream& os, const civil_year& y); + std::ostream& operator<<(std::ostream& os, const civil_month& m); + std::ostream& operator<<(std::ostream& os, const civil_day& d); + std::ostream& operator<<(std::ostream& os, const civil_hour& h); + std::ostream& operator<<(std::ostream& os, const civil_minute& m); + std::ostream& operator<<(std::ostream& os, const civil_second& s); + std::ostream& operator<<(std::ostream& os, weekday wd); + + } // namespace detail + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#undef CONSTEXPR_M +#undef CONSTEXPR_F +#undef CONSTEXPR_D + +#endif // ABSL_TIME_INTERNAL_CCTZ_CIVIL_TIME_DETAIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/time_zone.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/time_zone.h new file mode 100644 index 00000000..0ababf99 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/time_zone.h @@ -0,0 +1,497 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A library for translating between absolute times (represented by +// std::chrono::time_points of the std::chrono::system_clock) and civil +// times (represented by cctz::civil_second) using the rules defined by +// a time zone (cctz::time_zone). + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_ + +#include +#include +#include +#include // NOLINT: We use std::ratio in this header +#include +#include + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // Convenience aliases. Not intended as public API points. + template + using time_point = std::chrono::time_point; + using seconds = std::chrono::duration; + using sys_seconds = seconds; // Deprecated. Use cctz::seconds instead. + + namespace detail + { + template + std::pair, D> split_seconds(const time_point& tp); + std::pair, seconds> split_seconds( + const time_point& tp + ); + } // namespace detail + + // cctz::time_zone is an opaque, small, value-type class representing a + // geo-political region within which particular rules are used for mapping + // between absolute and civil times. Time zones are named using the TZ + // identifiers from the IANA Time Zone Database, such as "America/Los_Angeles" + // or "Australia/Sydney". Time zones are created from factory functions such + // as load_time_zone(). Note: strings like "PST" and "EDT" are not valid TZ + // identifiers. + // + // Example: + // cctz::time_zone utc = cctz::utc_time_zone(); + // cctz::time_zone pst = cctz::fixed_time_zone(std::chrono::hours(-8)); + // cctz::time_zone loc = cctz::local_time_zone(); + // cctz::time_zone lax; + // if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... } + // + // See also: + // - http://www.iana.org/time-zones + // - https://en.wikipedia.org/wiki/Zoneinfo + class time_zone + { + public: + time_zone() : + time_zone(nullptr) + { + } // Equivalent to UTC + time_zone(const time_zone&) = default; + time_zone& operator=(const time_zone&) = default; + + std::string name() const; + + // An absolute_lookup represents the civil time (cctz::civil_second) within + // this time_zone at the given absolute time (time_point). There are + // additionally a few other fields that may be useful when working with + // older APIs, such as std::tm. + // + // Example: + // const cctz::time_zone tz = ... + // const auto tp = std::chrono::system_clock::now(); + // const cctz::time_zone::absolute_lookup al = tz.lookup(tp); + struct absolute_lookup + { + civil_second cs; + // Note: The following fields exist for backward compatibility with older + // APIs. Accessing these fields directly is a sign of imprudent logic in + // the calling code. Modern time-related code should only access this data + // indirectly by way of cctz::format(). + int offset; // civil seconds east of UTC + bool is_dst; // is offset non-standard? + const char* abbr; // time-zone abbreviation (e.g., "PST") + }; + absolute_lookup lookup(const time_point& tp) const; + template + absolute_lookup lookup(const time_point& tp) const + { + return lookup(detail::split_seconds(tp).first); + } + + // A civil_lookup represents the absolute time(s) (time_point) that + // correspond to the given civil time (cctz::civil_second) within this + // time_zone. Usually the given civil time represents a unique instant + // in time, in which case the conversion is unambiguous. However, + // within this time zone, the given civil time may be skipped (e.g., + // during a positive UTC offset shift), or repeated (e.g., during a + // negative UTC offset shift). To account for these possibilities, + // civil_lookup is richer than just a single time_point. + // + // In all cases the civil_lookup::kind enum will indicate the nature + // of the given civil-time argument, and the pre, trans, and post + // members will give the absolute time answers using the pre-transition + // offset, the transition point itself, and the post-transition offset, + // respectively (all three times are equal if kind == UNIQUE). If any + // of these three absolute times is outside the representable range of a + // time_point the field is set to its maximum/minimum value. + // + // Example: + // cctz::time_zone lax; + // if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... } + // + // // A unique civil time. + // auto jan01 = lax.lookup(cctz::civil_second(2011, 1, 1, 0, 0, 0)); + // // jan01.kind == cctz::time_zone::civil_lookup::UNIQUE + // // jan01.pre is 2011/01/01 00:00:00 -0800 + // // jan01.trans is 2011/01/01 00:00:00 -0800 + // // jan01.post is 2011/01/01 00:00:00 -0800 + // + // // A Spring DST transition, when there is a gap in civil time. + // auto mar13 = lax.lookup(cctz::civil_second(2011, 3, 13, 2, 15, 0)); + // // mar13.kind == cctz::time_zone::civil_lookup::SKIPPED + // // mar13.pre is 2011/03/13 03:15:00 -0700 + // // mar13.trans is 2011/03/13 03:00:00 -0700 + // // mar13.post is 2011/03/13 01:15:00 -0800 + // + // // A Fall DST transition, when civil times are repeated. + // auto nov06 = lax.lookup(cctz::civil_second(2011, 11, 6, 1, 15, 0)); + // // nov06.kind == cctz::time_zone::civil_lookup::REPEATED + // // nov06.pre is 2011/11/06 01:15:00 -0700 + // // nov06.trans is 2011/11/06 01:00:00 -0800 + // // nov06.post is 2011/11/06 01:15:00 -0800 + struct civil_lookup + { + enum civil_kind + { + UNIQUE, // the civil time was singular (pre == trans == post) + SKIPPED, // the civil time did not exist (pre >= trans > post) + REPEATED, // the civil time was ambiguous (pre < trans <= post) + } kind; + time_point pre; // uses the pre-transition offset + time_point trans; // instant of civil-offset change + time_point post; // uses the post-transition offset + }; + civil_lookup lookup(const civil_second& cs) const; + + // Finds the time of the next/previous offset change in this time zone. + // + // By definition, next_transition(tp, &trans) returns false when tp has + // its maximum value, and prev_transition(tp, &trans) returns false + // when tp has its minimum value. If the zone has no transitions, the + // result will also be false no matter what the argument. + // + // Otherwise, when tp has its minimum value, next_transition(tp, &trans) + // returns true and sets trans to the first recorded transition. Chains + // of calls to next_transition()/prev_transition() will eventually return + // false, but it is unspecified exactly when next_transition(tp, &trans) + // jumps to false, or what time is set by prev_transition(tp, &trans) for + // a very distant tp. + // + // Note: Enumeration of time-zone transitions is for informational purposes + // only. Modern time-related code should not care about when offset changes + // occur. + // + // Example: + // cctz::time_zone nyc; + // if (!cctz::load_time_zone("America/New_York", &nyc)) { ... } + // const auto now = std::chrono::system_clock::now(); + // auto tp = cctz::time_point::min(); + // cctz::time_zone::civil_transition trans; + // while (tp <= now && nyc.next_transition(tp, &trans)) { + // // transition: trans.from -> trans.to + // tp = nyc.lookup(trans.to).trans; + // } + struct civil_transition + { + civil_second from; // the civil time we jump from + civil_second to; // the civil time we jump to + }; + bool next_transition(const time_point& tp, civil_transition* trans) const; + template + bool next_transition(const time_point& tp, civil_transition* trans) const + { + return next_transition(detail::split_seconds(tp).first, trans); + } + bool prev_transition(const time_point& tp, civil_transition* trans) const; + template + bool prev_transition(const time_point& tp, civil_transition* trans) const + { + return prev_transition(detail::split_seconds(tp).first, trans); + } + + // version() and description() provide additional information about the + // time zone. The content of each of the returned strings is unspecified, + // however, when the IANA Time Zone Database is the underlying data source + // the version() string will be in the familar form (e.g, "2018e") or + // empty when unavailable. + // + // Note: These functions are for informational or testing purposes only. + std::string version() const; // empty when unknown + std::string description() const; + + // Relational operators. + friend bool operator==(time_zone lhs, time_zone rhs) + { + return &lhs.effective_impl() == &rhs.effective_impl(); + } + friend bool operator!=(time_zone lhs, time_zone rhs) + { + return !(lhs == rhs); + } + + template + friend H AbslHashValue(H h, time_zone tz) + { + return H::combine(std::move(h), &tz.effective_impl()); + } + + class Impl; + + private: + explicit time_zone(const Impl* impl) : + impl_(impl) + { + } + const Impl& effective_impl() const; // handles implicit UTC + const Impl* impl_; + }; + + // Loads the named time zone. May perform I/O on the initial load. + // If the name is invalid, or some other kind of error occurs, returns + // false and "*tz" is set to the UTC time zone. + bool load_time_zone(const std::string& name, time_zone* tz); + + // Returns a time_zone representing UTC. Cannot fail. + time_zone utc_time_zone(); + + // Returns a time zone that is a fixed offset (seconds east) from UTC. + // Note: If the absolute value of the offset is greater than 24 hours + // you'll get UTC (i.e., zero offset) instead. + time_zone fixed_time_zone(const seconds& offset); + + // Returns a time zone representing the local time zone. Falls back to UTC. + // Note: local_time_zone.name() may only be something like "localtime". + time_zone local_time_zone(); + + // Returns the civil time (cctz::civil_second) within the given time zone at + // the given absolute time (time_point). Since the additional fields provided + // by the time_zone::absolute_lookup struct should rarely be needed in modern + // code, this convert() function is simpler and should be preferred. + template + inline civil_second convert(const time_point& tp, const time_zone& tz) + { + return tz.lookup(tp).cs; + } + + // Returns the absolute time (time_point) that corresponds to the given civil + // time within the given time zone. If the civil time is not unique (i.e., if + // it was either repeated or non-existent), then the returned time_point is + // the best estimate that preserves relative order. That is, this function + // guarantees that if cs1 < cs2, then convert(cs1, tz) <= convert(cs2, tz). + inline time_point convert(const civil_second& cs, const time_zone& tz) + { + const time_zone::civil_lookup cl = tz.lookup(cs); + if (cl.kind == time_zone::civil_lookup::SKIPPED) + return cl.trans; + return cl.pre; + } + + namespace detail + { + using femtoseconds = std::chrono::duration; + std::string format(const std::string&, const time_point&, const femtoseconds&, const time_zone&); + bool parse(const std::string&, const std::string&, const time_zone&, time_point*, femtoseconds*, std::string* err = nullptr); + template + bool join_seconds( + const time_point& sec, const femtoseconds& fs, time_point>>* tpp + ); + template + bool join_seconds( + const time_point& sec, const femtoseconds& fs, time_point>>* tpp + ); + template + bool join_seconds( + const time_point& sec, const femtoseconds& fs, time_point>>* tpp + ); + bool join_seconds(const time_point& sec, const femtoseconds&, time_point* tpp); + } // namespace detail + + // Formats the given time_point in the given cctz::time_zone according to + // the provided format string. Uses strftime()-like formatting options, + // with the following extensions: + // + // - %Ez - RFC3339-compatible numeric UTC offset (+hh:mm or -hh:mm) + // - %E*z - Full-resolution numeric UTC offset (+hh:mm:ss or -hh:mm:ss) + // - %E#S - Seconds with # digits of fractional precision + // - %E*S - Seconds with full fractional precision (a literal '*') + // - %E#f - Fractional seconds with # digits of precision + // - %E*f - Fractional seconds with full precision (a literal '*') + // - %E4Y - Four-character years (-999 ... -001, 0000, 0001 ... 9999) + // - %ET - The RFC3339 "date-time" separator "T" + // + // Note that %E0S behaves like %S, and %E0f produces no characters. In + // contrast %E*f always produces at least one digit, which may be '0'. + // + // Note that %Y produces as many characters as it takes to fully render the + // year. A year outside of [-999:9999] when formatted with %E4Y will produce + // more than four characters, just like %Y. + // + // Tip: Format strings should include the UTC offset (e.g., %z, %Ez, or %E*z) + // so that the resulting string uniquely identifies an absolute time. + // + // Example: + // cctz::time_zone lax; + // if (!cctz::load_time_zone("America/Los_Angeles", &lax)) { ... } + // auto tp = cctz::convert(cctz::civil_second(2013, 1, 2, 3, 4, 5), lax); + // std::string f = cctz::format("%H:%M:%S", tp, lax); // "03:04:05" + // f = cctz::format("%H:%M:%E3S", tp, lax); // "03:04:05.000" + template + inline std::string format(const std::string& fmt, const time_point& tp, const time_zone& tz) + { + const auto p = detail::split_seconds(tp); + const auto n = std::chrono::duration_cast(p.second); + return detail::format(fmt, p.first, n, tz); + } + + // Parses an input string according to the provided format string and + // returns the corresponding time_point. Uses strftime()-like formatting + // options, with the same extensions as cctz::format(), but with the + // exceptions that %E#S is interpreted as %E*S, and %E#f as %E*f. %Ez + // and %E*z also accept the same inputs, which (along with %z) includes + // 'z' and 'Z' as synonyms for +00:00. %ET accepts either 'T' or 't'. + // + // %Y consumes as many numeric characters as it can, so the matching data + // should always be terminated with a non-numeric. %E4Y always consumes + // exactly four characters, including any sign. + // + // Unspecified fields are taken from the default date and time of ... + // + // "1970-01-01 00:00:00.0 +0000" + // + // For example, parsing a string of "15:45" (%H:%M) will return a time_point + // that represents "1970-01-01 15:45:00.0 +0000". + // + // Note that parse() returns time instants, so it makes most sense to parse + // fully-specified date/time strings that include a UTC offset (%z, %Ez, or + // %E*z). + // + // Note also that parse() only heeds the fields year, month, day, hour, + // minute, (fractional) second, and UTC offset. Other fields, like weekday (%a + // or %A), while parsed for syntactic validity, are ignored in the conversion. + // + // Date and time fields that are out-of-range will be treated as errors rather + // than normalizing them like cctz::civil_second() would do. For example, it + // is an error to parse the date "Oct 32, 2013" because 32 is out of range. + // + // A second of ":60" is normalized to ":00" of the following minute with + // fractional seconds discarded. The following table shows how the given + // seconds and subseconds will be parsed: + // + // "59.x" -> 59.x // exact + // "60.x" -> 00.0 // normalized + // "00.x" -> 00.x // exact + // + // Errors are indicated by returning false. + // + // Example: + // const cctz::time_zone tz = ... + // std::chrono::system_clock::time_point tp; + // if (cctz::parse("%Y-%m-%d", "2015-10-09", tz, &tp)) { + // ... + // } + template + inline bool parse(const std::string& fmt, const std::string& input, const time_zone& tz, time_point* tpp) + { + time_point sec; + detail::femtoseconds fs; + return detail::parse(fmt, input, tz, &sec, &fs) && + detail::join_seconds(sec, fs, tpp); + } + + namespace detail + { + + // Split a time_point into a time_point and a D subseconds. + // Undefined behavior if time_point is not of sufficient range. + // Note that this means it is UB to call cctz::time_zone::lookup(tp) or + // cctz::format(fmt, tp, tz) with a time_point that is outside the range + // of a 64-bit std::time_t. + template + std::pair, D> split_seconds(const time_point& tp) + { + auto sec = std::chrono::time_point_cast(tp); + auto sub = tp - sec; + if (sub.count() < 0) + { + sec -= seconds(1); + sub += seconds(1); + } + return {sec, std::chrono::duration_cast(sub)}; + } + + inline std::pair, seconds> split_seconds( + const time_point& tp + ) + { + return {tp, seconds::zero()}; + } + + // Join a time_point and femto subseconds into a time_point. + // Floors to the resolution of time_point. Returns false if time_point + // is not of sufficient range. + template + bool join_seconds( + const time_point& sec, const femtoseconds& fs, time_point>>* tpp + ) + { + using D = std::chrono::duration>; + // TODO(#199): Return false if result unrepresentable as a time_point. + *tpp = std::chrono::time_point_cast(sec); + *tpp += std::chrono::duration_cast(fs); + return true; + } + + template + bool join_seconds( + const time_point& sec, const femtoseconds&, time_point>>* tpp + ) + { + using D = std::chrono::duration>; + auto count = sec.time_since_epoch().count(); + if (count >= 0 || count % Num == 0) + { + count /= Num; + } + else + { + count /= Num; + count -= 1; + } + if (count > (std::numeric_limits::max)()) + return false; + if (count < (std::numeric_limits::min)()) + return false; + *tpp = time_point() + D{static_cast(count)}; + return true; + } + + template + bool join_seconds( + const time_point& sec, const femtoseconds&, time_point>>* tpp + ) + { + using D = std::chrono::duration>; + auto count = sec.time_since_epoch().count(); + if (count > (std::numeric_limits::max)()) + return false; + if (count < (std::numeric_limits::min)()) + return false; + *tpp = time_point() + D{static_cast(count)}; + return true; + } + + inline bool join_seconds(const time_point& sec, const femtoseconds&, time_point* tpp) + { + *tpp = sec; + return true; + } + + } // namespace detail + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/zone_info_source.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/zone_info_source.h new file mode 100644 index 00000000..ab5cbc62 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/zone_info_source.h @@ -0,0 +1,110 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_ +#define ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // A stdio-like interface for providing zoneinfo data for a particular zone. + class ZoneInfoSource + { + public: + virtual ~ZoneInfoSource(); + + virtual std::size_t Read(void* ptr, std::size_t size) = 0; // like fread() + virtual int Skip(std::size_t offset) = 0; // like fseek() + + // Until the zoneinfo data supports versioning information, we provide + // a way for a ZoneInfoSource to indicate it out-of-band. The default + // implementation returns an empty string. + virtual std::string Version() const; + }; + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz_extension + { + + // A function-pointer type for a factory that returns a ZoneInfoSource + // given the name of a time zone and a fallback factory. Returns null + // when the data for the named zone cannot be found. + using ZoneInfoSourceFactory = + std::unique_ptr (*)( + const std::string&, + const std::function(const std::string&)>& + ); + + // The user can control the mapping of zone names to zoneinfo data by + // providing a definition for cctz_extension::zone_info_source_factory. + // For example, given functions my_factory() and my_other_factory() that + // can return a ZoneInfoSource for a named zone, we could inject them into + // cctz::load_time_zone() with: + // + // namespace cctz_extension { + // namespace { + // std::unique_ptr CustomFactory( + // const std::string& name, + // const std::function( + // const std::string& name)>& fallback_factory) { + // if (auto zip = my_factory(name)) return zip; + // if (auto zip = fallback_factory(name)) return zip; + // if (auto zip = my_other_factory(name)) return zip; + // return nullptr; + // } + // } // namespace + // ZoneInfoSourceFactory zone_info_source_factory = CustomFactory; + // } // namespace cctz_extension + // + // This might be used, say, to use zoneinfo data embedded in the program, + // or read from a (possibly compressed) file archive, or both. + // + // cctz_extension::zone_info_source_factory() will be called: + // (1) from the same thread as the cctz::load_time_zone() call, + // (2) only once for any zone name, and + // (3) serially (i.e., no concurrent execution). + // + // The fallback factory obtains zoneinfo data by reading files in ${TZDIR}, + // and it is used automatically when no zone_info_source_factory definition + // is linked into the program. + extern ZoneInfoSourceFactory zone_info_source_factory; + + } // namespace cctz_extension + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_ZONE_INFO_SOURCE_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_fixed.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_fixed.h new file mode 100644 index 00000000..5aaa52ea --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_fixed.h @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_ + +#include + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // Helper functions for dealing with the names and abbreviations + // of time zones that are a fixed offset (seconds east) from UTC. + // FixedOffsetFromName() extracts the offset from a valid fixed-offset + // name, while FixedOffsetToName() and FixedOffsetToAbbr() generate + // the canonical zone name and abbreviation respectively for the given + // offset. + // + // A fixed-offset name looks like "Fixed/UTC<+->::". + // Its abbreviation is of the form "UTC(<+->H?H(MM(SS)?)?)?" where the + // optional pieces are omitted when their values are zero. (Note that + // the sign is the opposite of that used in a POSIX TZ specification.) + // + // Note: FixedOffsetFromName() fails on syntax errors or when the parsed + // offset exceeds 24 hours. FixedOffsetToName() and FixedOffsetToAbbr() + // both produce "UTC" when the argument offset exceeds 24 hours. + bool FixedOffsetFromName(const std::string& name, seconds* offset); + std::string FixedOffsetToName(const seconds& offset); + std::string FixedOffsetToAbbr(const seconds& offset); + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_FIXED_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_if.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_if.h new file mode 100644 index 00000000..6b0b58bc --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_if.h @@ -0,0 +1,87 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // A simple interface used to hide time-zone complexities from time_zone::Impl. + // Subclasses implement the functions for civil-time conversions in the zone. + class TimeZoneIf + { + public: + // Factory functions for TimeZoneIf implementations. + static std::unique_ptr UTC(); // never fails + static std::unique_ptr Make(const std::string& name); + + virtual ~TimeZoneIf(); + + virtual time_zone::absolute_lookup BreakTime( + const time_point& tp + ) const = 0; + virtual time_zone::civil_lookup MakeTime(const civil_second& cs) const = 0; + + virtual bool NextTransition(const time_point& tp, time_zone::civil_transition* trans) const = 0; + virtual bool PrevTransition(const time_point& tp, time_zone::civil_transition* trans) const = 0; + + virtual std::string Version() const = 0; + virtual std::string Description() const = 0; + + protected: + TimeZoneIf() = default; + TimeZoneIf(const TimeZoneIf&) = delete; + TimeZoneIf& operator=(const TimeZoneIf&) = delete; + }; + + // Convert between time_point and a count of seconds since the + // Unix epoch. We assume that the std::chrono::system_clock and the + // Unix clock are second aligned, and that the results are representable. + // (That is, that they share an epoch, which is required since C++20.) + inline std::int_fast64_t ToUnixSeconds(const time_point& tp) + { + return (tp - std::chrono::time_point_cast( + std::chrono::system_clock::from_time_t(0) + )) + .count(); + } + inline time_point FromUnixSeconds(std::int_fast64_t t) + { + return std::chrono::time_point_cast( + std::chrono::system_clock::from_time_t(0) + ) + + seconds(t); + } + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IF_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_impl.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_impl.h new file mode 100644 index 00000000..3e5c10e4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_impl.h @@ -0,0 +1,110 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" +#include "time_zone_if.h" +#include "time_zone_info.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // time_zone::Impl is the internal object referenced by a cctz::time_zone. + class time_zone::Impl + { + public: + // The UTC time zone. Also used for other time zones that fail to load. + static time_zone UTC(); + + // Load a named time zone. Returns false if the name is invalid, or if + // some other kind of error occurs. Note that loading "UTC" never fails. + static bool LoadTimeZone(const std::string& name, time_zone* tz); + + // Clears the map of cached time zones. Primarily for use in benchmarks + // that gauge the performance of loading/parsing the time-zone data. + static void ClearTimeZoneMapTestOnly(); + + // The primary key is the time-zone ID (e.g., "America/New_York"). + const std::string& Name() const + { + // TODO: It would nice if the zoneinfo data included the zone name. + return name_; + } + + // Breaks a time_point down to civil-time components in this time zone. + time_zone::absolute_lookup BreakTime(const time_point& tp) const + { + return zone_->BreakTime(tp); + } + + // Converts the civil-time components in this time zone into a time_point. + // That is, the opposite of BreakTime(). The requested civil time may be + // ambiguous or illegal due to a change of UTC offset. + time_zone::civil_lookup MakeTime(const civil_second& cs) const + { + return zone_->MakeTime(cs); + } + + // Finds the time of the next/previous offset change in this time zone. + bool NextTransition(const time_point& tp, time_zone::civil_transition* trans) const + { + return zone_->NextTransition(tp, trans); + } + bool PrevTransition(const time_point& tp, time_zone::civil_transition* trans) const + { + return zone_->PrevTransition(tp, trans); + } + + // Returns an implementation-defined version string for this time zone. + std::string Version() const + { + return zone_->Version(); + } + + // Returns an implementation-defined description of this time zone. + std::string Description() const + { + return zone_->Description(); + } + + private: + Impl(); + explicit Impl(const std::string& name); + Impl(const Impl&) = delete; + Impl& operator=(const Impl&) = delete; + + static const Impl* UTCImpl(); + + const std::string name_; + std::unique_ptr zone_; + }; + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_IMPL_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_info.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_info.h new file mode 100644 index 00000000..03304c5d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_info.h @@ -0,0 +1,132 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/time/internal/cctz/include/cctz/civil_time.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" +#include "absl/time/internal/cctz/include/cctz/zone_info_source.h" +#include "time_zone_if.h" +#include "tzfile.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // A transition to a new UTC offset. + struct Transition + { + std::int_least64_t unix_time; // the instant of this transition + std::uint_least8_t type_index; // index of the transition type + civil_second civil_sec; // local civil time of transition + civil_second prev_civil_sec; // local civil time one second earlier + + struct ByUnixTime + { + inline bool operator()(const Transition& lhs, const Transition& rhs) const + { + return lhs.unix_time < rhs.unix_time; + } + }; + struct ByCivilTime + { + inline bool operator()(const Transition& lhs, const Transition& rhs) const + { + return lhs.civil_sec < rhs.civil_sec; + } + }; + }; + + // The characteristics of a particular transition. + struct TransitionType + { + std::int_least32_t utc_offset; // the new prevailing UTC offset + civil_second civil_max; // max convertible civil time for offset + civil_second civil_min; // min convertible civil time for offset + bool is_dst; // did we move into daylight-saving time + std::uint_least8_t abbr_index; // index of the new abbreviation + }; + + // A time zone backed by the IANA Time Zone Database (zoneinfo). + class TimeZoneInfo : public TimeZoneIf + { + public: + // Factories. + static std::unique_ptr UTC(); // never fails + static std::unique_ptr Make(const std::string& name); + + // TimeZoneIf implementations. + time_zone::absolute_lookup BreakTime( + const time_point& tp + ) const override; + time_zone::civil_lookup MakeTime(const civil_second& cs) const override; + bool NextTransition(const time_point& tp, time_zone::civil_transition* trans) const override; + bool PrevTransition(const time_point& tp, time_zone::civil_transition* trans) const override; + std::string Version() const override; + std::string Description() const override; + + private: + TimeZoneInfo() = default; + TimeZoneInfo(const TimeZoneInfo&) = delete; + TimeZoneInfo& operator=(const TimeZoneInfo&) = delete; + + bool GetTransitionType(std::int_fast32_t utc_offset, bool is_dst, const std::string& abbr, std::uint_least8_t* index); + bool EquivTransitions(std::uint_fast8_t tt1_index, std::uint_fast8_t tt2_index) const; + bool ExtendTransitions(); + + bool ResetToBuiltinUTC(const seconds& offset); + bool Load(const std::string& name); + bool Load(ZoneInfoSource* zip); + + // Helpers for BreakTime() and MakeTime(). + time_zone::absolute_lookup LocalTime(std::int_fast64_t unix_time, const TransitionType& tt) const; + time_zone::absolute_lookup LocalTime(std::int_fast64_t unix_time, const Transition& tr) const; + time_zone::civil_lookup TimeLocal(const civil_second& cs, year_t c4_shift) const; + + std::vector transitions_; // ordered by unix_time and civil_sec + std::vector transition_types_; // distinct transition types + std::uint_fast8_t default_transition_type_; // for before first transition + std::string abbreviations_; // all the NUL-terminated abbreviations + + std::string version_; // the tzdata version if available + std::string future_spec_; // for after the last zic transition + bool extended_; // future_spec_ was used to generate transitions + year_t last_year_; // the final year of the generated transitions + + // We remember the transitions found during the last BreakTime() and + // MakeTime() calls. If the next request is for the same transition we + // will avoid re-searching. + mutable std::atomic local_time_hint_ = {}; // BreakTime() hint + mutable std::atomic time_local_hint_ = {}; // MakeTime() hint + }; + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_INFO_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_libc.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_libc.h new file mode 100644 index 00000000..d28ddbfb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_libc.h @@ -0,0 +1,63 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_ + +#include +#include + +#include "absl/base/config.h" +#include "time_zone_if.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // A time zone backed by gmtime_r(3), localtime_r(3), and mktime(3), + // and which therefore only supports UTC and the local time zone. + class TimeZoneLibC : public TimeZoneIf + { + public: + // Factory. + static std::unique_ptr Make(const std::string& name); + + // TimeZoneIf implementations. + time_zone::absolute_lookup BreakTime( + const time_point& tp + ) const override; + time_zone::civil_lookup MakeTime(const civil_second& cs) const override; + bool NextTransition(const time_point& tp, time_zone::civil_transition* trans) const override; + bool PrevTransition(const time_point& tp, time_zone::civil_transition* trans) const override; + std::string Version() const override; + std::string Description() const override; + + private: + explicit TimeZoneLibC(const std::string& name); + TimeZoneLibC(const TimeZoneLibC&) = delete; + TimeZoneLibC& operator=(const TimeZoneLibC&) = delete; + + const bool local_; // localtime or UTC + }; + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_posix.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_posix.h new file mode 100644 index 00000000..5b9e885f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_posix.h @@ -0,0 +1,148 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parsing of a POSIX zone spec as described in the TZ part of section 8.3 in +// http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html. +// +// The current POSIX spec for America/Los_Angeles is "PST8PDT,M3.2.0,M11.1.0", +// which would be broken down as ... +// +// PosixTimeZone { +// std_abbr = "PST" +// std_offset = -28800 +// dst_abbr = "PDT" +// dst_offset = -25200 +// dst_start = PosixTransition { +// date { +// m { +// month = 3 +// week = 2 +// weekday = 0 +// } +// } +// time { +// offset = 7200 +// } +// } +// dst_end = PosixTransition { +// date { +// m { +// month = 11 +// week = 1 +// weekday = 0 +// } +// } +// time { +// offset = 7200 +// } +// } +// } + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + namespace cctz + { + + // The date/time of the transition. The date is specified as either: + // (J) the Nth day of the year (1 <= N <= 365), excluding leap days, or + // (N) the Nth day of the year (0 <= N <= 365), including leap days, or + // (M) the Nth weekday of a month (e.g., the 2nd Sunday in March). + // The time, specified as a day offset, identifies the particular moment + // of the transition, and may be negative or >= 24h, and in which case + // it would take us to another day, and perhaps week, or even month. + struct PosixTransition + { + enum DateFormat + { + J, + N, + M + }; + + struct Date + { + struct NonLeapDay + { + std::int_fast16_t day; // day of non-leap year [1:365] + }; + struct Day + { + std::int_fast16_t day; // day of year [0:365] + }; + struct MonthWeekWeekday + { + std::int_fast8_t month; // month of year [1:12] + std::int_fast8_t week; // week of month [1:5] (5==last) + std::int_fast8_t weekday; // 0==Sun, ..., 6=Sat + }; + + DateFormat fmt; + + union + { + NonLeapDay j; + Day n; + MonthWeekWeekday m; + }; + }; + + struct Time + { + std::int_fast32_t offset; // seconds before/after 00:00:00 + }; + + Date date; + Time time; + }; + + // The entirety of a POSIX-string specified time-zone rule. The standard + // abbreviation and offset are always given. If the time zone includes + // daylight saving, then the daylight abbreviation is non-empty and the + // remaining fields are also valid. Note that the start/end transitions + // are not ordered---in the southern hemisphere the transition to end + // daylight time occurs first in any particular year. + struct PosixTimeZone + { + std::string std_abbr; + std::int_fast32_t std_offset; + + std::string dst_abbr; + std::int_fast32_t dst_offset; + PosixTransition dst_start; + PosixTransition dst_end; + }; + + // Breaks down a POSIX time-zone specification into its constituent pieces, + // filling in any missing values (DST offset, or start/end transition times) + // with the standard-defined defaults. Returns false if the specification + // could not be parsed (although some fields of *res may have been altered). + bool ParsePosixSpec(const std::string& spec, PosixTimeZone* res); + + } // namespace cctz + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_POSIX_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/tzfile.h b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/tzfile.h new file mode 100644 index 00000000..385b011b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/cctz/src/tzfile.h @@ -0,0 +1,127 @@ +/* Layout and location of TZif files. */ + +#ifndef TZFILE_H + +#define TZFILE_H + +/* +** This file is in the public domain, so clarified as of +** 1996-06-05 by Arthur David Olson. +*/ + +/* +** This header is for use ONLY with the time conversion code. +** There is no guarantee that it will remain unchanged, +** or that it will remain at all. +** Do NOT copy it to any system include directory. +** Thank you! +*/ + +/* +** Information about time zone files. +*/ + +#ifndef TZDIR +#define TZDIR "/usr/share/zoneinfo" /* Time zone object file directory */ +#endif /* !defined TZDIR */ + +#ifndef TZDEFAULT +#define TZDEFAULT "/etc/localtime" +#endif /* !defined TZDEFAULT */ + +#ifndef TZDEFRULES +#define TZDEFRULES "posixrules" +#endif /* !defined TZDEFRULES */ + +/* See Internet RFC 8536 for more details about the following format. */ + +/* +** Each file begins with. . . +*/ + +#define TZ_MAGIC "TZif" + +struct tzhead +{ + char tzh_magic[4]; /* TZ_MAGIC */ + char tzh_version[1]; /* '\0' or '2'-'4' as of 2021 */ + char tzh_reserved[15]; /* reserved; must be zero */ + char tzh_ttisutcnt[4]; /* coded number of trans. time flags */ + char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */ + char tzh_leapcnt[4]; /* coded number of leap seconds */ + char tzh_timecnt[4]; /* coded number of transition times */ + char tzh_typecnt[4]; /* coded number of local time types */ + char tzh_charcnt[4]; /* coded number of abbr. chars */ +}; + +/* +** . . .followed by. . . +** +** tzh_timecnt (char [4])s coded transition times a la time(2) +** tzh_timecnt (unsigned char)s types of local time starting at above +** tzh_typecnt repetitions of +** one (char [4]) coded UT offset in seconds +** one (unsigned char) used to set tm_isdst +** one (unsigned char) that's an abbreviation list index +** tzh_charcnt (char)s '\0'-terminated zone abbreviations +** tzh_leapcnt repetitions of +** one (char [4]) coded leap second transition times +** one (char [4]) total correction after above +** tzh_ttisstdcnt (char)s indexed by type; if 1, transition +** time is standard time, if 0, +** transition time is local (wall clock) +** time; if absent, transition times are +** assumed to be local time +** tzh_ttisutcnt (char)s indexed by type; if 1, transition +** time is UT, if 0, transition time is +** local time; if absent, transition +** times are assumed to be local time. +** When this is 1, the corresponding +** std/wall indicator must also be 1. +*/ + +/* +** If tzh_version is '2' or greater, the above is followed by a second instance +** of tzhead and a second instance of the data in which each coded transition +** time uses 8 rather than 4 chars, +** then a POSIX-TZ-environment-variable-style string for use in handling +** instants after the last transition time stored in the file +** (with nothing between the newlines if there is no POSIX representation for +** such instants). +** +** If tz_version is '3' or greater, the above is extended as follows. +** First, the POSIX TZ string's hour offset may range from -167 +** through 167 as compared to the POSIX-required 0 through 24. +** Second, its DST start time may be January 1 at 00:00 and its stop +** time December 31 at 24:00 plus the difference between DST and +** standard time, indicating DST all year. +*/ + +/* +** In the current implementation, "tzset()" refuses to deal with files that +** exceed any of the limits below. +*/ + +#ifndef TZ_MAX_TIMES +/* This must be at least 242 for Europe/London with 'zic -b fat'. */ +#define TZ_MAX_TIMES 2000 +#endif /* !defined TZ_MAX_TIMES */ + +#ifndef TZ_MAX_TYPES +/* This must be at least 18 for Europe/Vilnius with 'zic -b fat'. */ +#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ +#endif /* !defined TZ_MAX_TYPES */ + +#ifndef TZ_MAX_CHARS +/* This must be at least 40 for America/Anchorage. */ +#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ + /* (limited by what unsigned chars can hold) */ +#endif /* !defined TZ_MAX_CHARS */ + +#ifndef TZ_MAX_LEAPS +/* This must be at least 27 for leap seconds from 1972 through mid-2023. + There's a plan to discontinue leap seconds by 2035. */ +#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ +#endif /* !defined TZ_MAX_LEAPS */ + +#endif /* !defined TZFILE_H */ diff --git a/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_chrono.inc b/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_chrono.inc new file mode 100644 index 00000000..5eeb6406 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_chrono.inc @@ -0,0 +1,31 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { + +static int64_t GetCurrentTimeNanosFromSystem() { + return std::chrono::duration_cast( + std::chrono::system_clock::now() - + std::chrono::system_clock::from_time_t(0)) + .count(); +} + +} // namespace time_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_posix.inc b/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_posix.inc new file mode 100644 index 00000000..42072000 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/get_current_time_posix.inc @@ -0,0 +1,24 @@ +#include "absl/time/clock.h" + +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { + +static int64_t GetCurrentTimeNanosFromSystem() { + const int64_t kNanosPerSecond = 1000 * 1000 * 1000; + struct timespec ts; + ABSL_RAW_CHECK(clock_gettime(CLOCK_REALTIME, &ts) == 0, + "Failed to read real-time clock."); + return (int64_t{ts.tv_sec} * kNanosPerSecond + + int64_t{ts.tv_nsec}); +} + +} // namespace time_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/time/internal/test_util.h b/CAPI/cpp/grpc/include/absl/time/internal/test_util.h new file mode 100644 index 00000000..69891af3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/internal/test_util.h @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_TEST_UTIL_H_ +#define ABSL_TIME_INTERNAL_TEST_UTIL_H_ + +#include + +#include "absl/time/time.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + + // Loads the named timezone, but dies on any failure. + absl::TimeZone LoadTimeZone(const std::string& name); + + } // namespace time_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_TEST_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/time/time.h b/CAPI/cpp/grpc/include/absl/time/time.h new file mode 100644 index 00000000..e5f0747e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/time/time.h @@ -0,0 +1,1952 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: time.h +// ----------------------------------------------------------------------------- +// +// This header file defines abstractions for computing with absolute points +// in time, durations of time, and formatting and parsing time within a given +// time zone. The following abstractions are defined: +// +// * `absl::Time` defines an absolute, specific instance in time +// * `absl::Duration` defines a signed, fixed-length span of time +// * `absl::TimeZone` defines geopolitical time zone regions (as collected +// within the IANA Time Zone database (https://www.iana.org/time-zones)). +// +// Note: Absolute times are distinct from civil times, which refer to the +// human-scale time commonly represented by `YYYY-MM-DD hh:mm:ss`. The mapping +// between absolute and civil times can be specified by use of time zones +// (`absl::TimeZone` within this API). That is: +// +// Civil Time = F(Absolute Time, Time Zone) +// Absolute Time = G(Civil Time, Time Zone) +// +// See civil_time.h for abstractions related to constructing and manipulating +// civil time. +// +// Example: +// +// absl::TimeZone nyc; +// // LoadTimeZone() may fail so it's always better to check for success. +// if (!absl::LoadTimeZone("America/New_York", &nyc)) { +// // handle error case +// } +// +// // My flight leaves NYC on Jan 2, 2017 at 03:04:05 +// absl::CivilSecond cs(2017, 1, 2, 3, 4, 5); +// absl::Time takeoff = absl::FromCivil(cs, nyc); +// +// absl::Duration flight_duration = absl::Hours(21) + absl::Minutes(35); +// absl::Time landing = takeoff + flight_duration; +// +// absl::TimeZone syd; +// if (!absl::LoadTimeZone("Australia/Sydney", &syd)) { +// // handle error case +// } +// std::string s = absl::FormatTime( +// "My flight will land in Sydney on %Y-%m-%d at %H:%M:%S", +// landing, syd); + +#ifndef ABSL_TIME_TIME_H_ +#define ABSL_TIME_TIME_H_ + +#if !defined(_MSC_VER) +#include +#else +// We don't include `winsock2.h` because it drags in `windows.h` and friends, +// and they define conflicting macros like OPAQUE, ERROR, and more. This has the +// potential to break Abseil users. +// +// Instead we only forward declare `timeval` and require Windows users include +// `winsock2.h` themselves. This is both inconsistent and troublesome, but so is +// including 'windows.h' so we are picking the lesser of two evils here. +struct timeval; +#endif +#include // NOLINT(build/c++11) +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/strings/string_view.h" +#include "absl/time/civil_time.h" +#include "absl/time/internal/cctz/include/cctz/time_zone.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class Duration; // Defined below + class Time; // Defined below + class TimeZone; // Defined below + + namespace time_internal + { + int64_t IDivDuration(bool satq, Duration num, Duration den, Duration* rem); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi, uint32_t lo); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi, int64_t lo); + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration MakePosDoubleDuration(double n); + constexpr int64_t kTicksPerNanosecond = 4; + constexpr int64_t kTicksPerSecond = 1000 * 1000 * 1000 * kTicksPerNanosecond; + template + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<1, N>); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<60>); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<3600>); + template + using EnableIfIntegral = typename std::enable_if< + std::is_integral::value || std::is_enum::value, + int>::type; + template + using EnableIfFloat = + typename std::enable_if::value, int>::type; + } // namespace time_internal + + // Duration + // + // The `absl::Duration` class represents a signed, fixed-length amount of time. + // A `Duration` is generated using a unit-specific factory function, or is + // the result of subtracting one `absl::Time` from another. Durations behave + // like unit-safe integers and they support all the natural integer-like + // arithmetic operations. Arithmetic overflows and saturates at +/- infinity. + // `Duration` should be passed by value rather than const reference. + // + // Factory functions `Nanoseconds()`, `Microseconds()`, `Milliseconds()`, + // `Seconds()`, `Minutes()`, `Hours()` and `InfiniteDuration()` allow for + // creation of constexpr `Duration` values + // + // Examples: + // + // constexpr absl::Duration ten_ns = absl::Nanoseconds(10); + // constexpr absl::Duration min = absl::Minutes(1); + // constexpr absl::Duration hour = absl::Hours(1); + // absl::Duration dur = 60 * min; // dur == hour + // absl::Duration half_sec = absl::Milliseconds(500); + // absl::Duration quarter_sec = 0.25 * absl::Seconds(1); + // + // `Duration` values can be easily converted to an integral number of units + // using the division operator. + // + // Example: + // + // constexpr absl::Duration dur = absl::Milliseconds(1500); + // int64_t ns = dur / absl::Nanoseconds(1); // ns == 1500000000 + // int64_t ms = dur / absl::Milliseconds(1); // ms == 1500 + // int64_t sec = dur / absl::Seconds(1); // sec == 1 (subseconds truncated) + // int64_t min = dur / absl::Minutes(1); // min == 0 + // + // See the `IDivDuration()` and `FDivDuration()` functions below for details on + // how to access the fractional parts of the quotient. + // + // Alternatively, conversions can be performed using helpers such as + // `ToInt64Microseconds()` and `ToDoubleSeconds()`. + class Duration + { + public: + // Value semantics. + constexpr Duration() : + rep_hi_(0), + rep_lo_(0) + { + } // zero-length duration + + // Copyable. +#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1930 + // Explicitly defining the constexpr copy constructor avoids an MSVC bug. + constexpr Duration(const Duration& d) : + rep_hi_(d.rep_hi_), + rep_lo_(d.rep_lo_) + { + } +#else + constexpr Duration(const Duration& d) = default; +#endif + Duration& operator=(const Duration& d) = default; + + // Compound assignment operators. + Duration& operator+=(Duration d); + Duration& operator-=(Duration d); + Duration& operator*=(int64_t r); + Duration& operator*=(double r); + Duration& operator/=(int64_t r); + Duration& operator/=(double r); + Duration& operator%=(Duration rhs); + + // Overloads that forward to either the int64_t or double overloads above. + // Integer operands must be representable as int64_t. Integer division is + // truncating, so values less than the resolution will be returned as zero. + // Floating-point multiplication and division is rounding (halfway cases + // rounding away from zero), so values less than the resolution may be + // returned as either the resolution or zero. In particular, `d / 2.0` + // can produce `d` when it is the resolution and "even". + template = 0> + Duration& operator*=(T r) + { + int64_t x = r; + return *this *= x; + } + + template = 0> + Duration& operator/=(T r) + { + int64_t x = r; + return *this /= x; + } + + template = 0> + Duration& operator*=(T r) + { + double x = r; + return *this *= x; + } + + template = 0> + Duration& operator/=(T r) + { + double x = r; + return *this /= x; + } + + template + friend H AbslHashValue(H h, Duration d) + { + return H::combine(std::move(h), d.rep_hi_.Get(), d.rep_lo_); + } + + private: + friend constexpr int64_t time_internal::GetRepHi(Duration d); + friend constexpr uint32_t time_internal::GetRepLo(Duration d); + friend constexpr Duration time_internal::MakeDuration(int64_t hi, uint32_t lo); + constexpr Duration(int64_t hi, uint32_t lo) : + rep_hi_(hi), + rep_lo_(lo) + { + } + + // We store `rep_hi_` 4-byte rather than 8-byte aligned to avoid 4 bytes of + // tail padding. + class HiRep + { + public: + // Default constructor default-initializes `hi_`, which has the same + // semantics as default-initializing an `int64_t` (undetermined value). + HiRep() = default; + + HiRep(const HiRep&) = default; + HiRep& operator=(const HiRep&) = default; + + explicit constexpr HiRep(const int64_t value) : + // C++17 forbids default-initialization in constexpr contexts. We can + // remove this in C++20. +#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN + hi_(0), + lo_(0) +#else + lo_(0), + hi_(0) +#endif + { + *this = value; + } + + constexpr int64_t Get() const + { + const uint64_t unsigned_value = + (static_cast(hi_) << 32) | static_cast(lo_); + // `static_cast(unsigned_value)` is implementation-defined + // before c++20. On all supported platforms the behaviour is that mandated + // by c++20, i.e. "If the destination type is signed, [...] the result is + // the unique value of the destination type equal to the source value + // modulo 2^n, where n is the number of bits used to represent the + // destination type." + static_assert( + (static_cast((std::numeric_limits::max)()) == + int64_t{-1}) && + (static_cast(static_cast((std::numeric_limits::max)()) + 1) == + (std::numeric_limits::min)()), + "static_cast(uint64_t) does not have c++20 semantics" + ); + return static_cast(unsigned_value); + } + + constexpr HiRep& operator=(const int64_t value) + { + // "If the destination type is unsigned, the resulting value is the + // smallest unsigned value equal to the source value modulo 2^n + // where `n` is the number of bits used to represent the destination + // type". + const auto unsigned_value = static_cast(value); + hi_ = static_cast(unsigned_value >> 32); + lo_ = static_cast(unsigned_value); + return *this; + } + + private: + // Notes: + // - Ideally we would use a `char[]` and `std::bitcast`, but the latter + // does not exist (and is not constexpr in `absl`) before c++20. + // - Order is optimized depending on endianness so that the compiler can + // turn `Get()` (resp. `operator=()`) into a single 8-byte load (resp. + // store). +#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN + uint32_t hi_; + uint32_t lo_; +#else + uint32_t lo_; + uint32_t hi_; +#endif + }; + HiRep rep_hi_; + uint32_t rep_lo_; + }; + + // Relational Operators + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs, Duration rhs); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Duration lhs, Duration rhs) + { + return rhs < lhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>=(Duration lhs, Duration rhs) + { + return !(lhs < rhs); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<=(Duration lhs, Duration rhs) + { + return !(rhs < lhs); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs, Duration rhs); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator!=(Duration lhs, Duration rhs) + { + return !(lhs == rhs); + } + + // Additive Operators + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration operator-(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator+(Duration lhs, Duration rhs) + { + return lhs += rhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator-(Duration lhs, Duration rhs) + { + return lhs -= rhs; + } + + // Multiplicative Operators + // Integer operands must be representable as int64_t. + template + ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(Duration lhs, T rhs) + { + return lhs *= rhs; + } + template + ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator*(T lhs, Duration rhs) + { + return rhs *= lhs; + } + template + ABSL_ATTRIBUTE_CONST_FUNCTION Duration operator/(Duration lhs, T rhs) + { + return lhs /= rhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t operator/(Duration lhs, Duration rhs) + { + return time_internal::IDivDuration(true, lhs, rhs, + &lhs); // trunc towards zero + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator%(Duration lhs, Duration rhs) + { + return lhs %= rhs; + } + + // IDivDuration() + // + // Divides a numerator `Duration` by a denominator `Duration`, returning the + // quotient and remainder. The remainder always has the same sign as the + // numerator. The returned quotient and remainder respect the identity: + // + // numerator = denominator * quotient + remainder + // + // Returned quotients are capped to the range of `int64_t`, with the difference + // spilling into the remainder to uphold the above identity. This means that the + // remainder returned could differ from the remainder returned by + // `Duration::operator%` for huge quotients. + // + // See also the notes on `InfiniteDuration()` below regarding the behavior of + // division involving zero and infinite durations. + // + // Example: + // + // constexpr absl::Duration a = + // absl::Seconds(std::numeric_limits::max()); // big + // constexpr absl::Duration b = absl::Nanoseconds(1); // small + // + // absl::Duration rem = a % b; + // // rem == absl::ZeroDuration() + // + // // Here, q would overflow int64_t, so rem accounts for the difference. + // int64_t q = absl::IDivDuration(a, b, &rem); + // // q == std::numeric_limits::max(), rem == a - b * q + inline int64_t IDivDuration(Duration num, Duration den, Duration* rem) + { + return time_internal::IDivDuration(true, num, den, + rem); // trunc towards zero + } + + // FDivDuration() + // + // Divides a `Duration` numerator into a fractional number of units of a + // `Duration` denominator. + // + // See also the notes on `InfiniteDuration()` below regarding the behavior of + // division involving zero and infinite durations. + // + // Example: + // + // double d = absl::FDivDuration(absl::Milliseconds(1500), absl::Seconds(1)); + // // d == 1.5 + ABSL_ATTRIBUTE_CONST_FUNCTION double FDivDuration(Duration num, Duration den); + + // ZeroDuration() + // + // Returns a zero-length duration. This function behaves just like the default + // constructor, but the name helps make the semantics clear at call sites. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ZeroDuration() + { + return Duration(); + } + + // AbsDuration() + // + // Returns the absolute value of a duration. + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration AbsDuration(Duration d) + { + return (d < ZeroDuration()) ? -d : d; + } + + // Trunc() + // + // Truncates a duration (toward zero) to a multiple of a non-zero unit. + // + // Example: + // + // absl::Duration d = absl::Nanoseconds(123456789); + // absl::Duration a = absl::Trunc(d, absl::Microseconds(1)); // 123456us + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Trunc(Duration d, Duration unit); + + // Floor() + // + // Floors a duration using the passed duration unit to its largest value not + // greater than the duration. + // + // Example: + // + // absl::Duration d = absl::Nanoseconds(123456789); + // absl::Duration b = absl::Floor(d, absl::Microseconds(1)); // 123456us + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Floor(Duration d, Duration unit); + + // Ceil() + // + // Returns the ceiling of a duration using the passed duration unit to its + // smallest value not less than the duration. + // + // Example: + // + // absl::Duration d = absl::Nanoseconds(123456789); + // absl::Duration c = absl::Ceil(d, absl::Microseconds(1)); // 123457us + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Ceil(Duration d, Duration unit); + + // InfiniteDuration() + // + // Returns an infinite `Duration`. To get a `Duration` representing negative + // infinity, use `-InfiniteDuration()`. + // + // Duration arithmetic overflows to +/- infinity and saturates. In general, + // arithmetic with `Duration` infinities is similar to IEEE 754 infinities + // except where IEEE 754 NaN would be involved, in which case +/- + // `InfiniteDuration()` is used in place of a "nan" Duration. + // + // Examples: + // + // constexpr absl::Duration inf = absl::InfiniteDuration(); + // const absl::Duration d = ... any finite duration ... + // + // inf == inf + inf + // inf == inf + d + // inf == inf - inf + // -inf == d - inf + // + // inf == d * 1e100 + // inf == inf / 2 + // 0 == d / inf + // INT64_MAX == inf / d + // + // d < inf + // -inf < d + // + // // Division by zero returns infinity, or INT64_MIN/MAX where appropriate. + // inf == d / 0 + // INT64_MAX == d / absl::ZeroDuration() + // + // The examples involving the `/` operator above also apply to `IDivDuration()` + // and `FDivDuration()`. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration InfiniteDuration(); + + // Nanoseconds() + // Microseconds() + // Milliseconds() + // Seconds() + // Minutes() + // Hours() + // + // Factory functions for constructing `Duration` values from an integral number + // of the unit indicated by the factory function's name. The number must be + // representable as int64_t. + // + // NOTE: no "Days()" factory function exists because "a day" is ambiguous. + // Civil days are not always 24 hours long, and a 24-hour duration often does + // not correspond with a civil day. If a 24-hour duration is needed, use + // `absl::Hours(24)`. If you actually want a civil day, use absl::CivilDay + // from civil_time.h. + // + // Example: + // + // absl::Duration a = absl::Seconds(60); + // absl::Duration b = absl::Minutes(1); // b == a + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Nanoseconds(T n) + { + return time_internal::FromInt64(n, std::nano{}); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Microseconds(T n) + { + return time_internal::FromInt64(n, std::micro{}); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Milliseconds(T n) + { + return time_internal::FromInt64(n, std::milli{}); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Seconds(T n) + { + return time_internal::FromInt64(n, std::ratio<1>{}); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Minutes(T n) + { + return time_internal::FromInt64(n, std::ratio<60>{}); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration Hours(T n) + { + return time_internal::FromInt64(n, std::ratio<3600>{}); + } + + // Factory overloads for constructing `Duration` values from a floating-point + // number of the unit indicated by the factory function's name. These functions + // exist for convenience, but they are not as efficient as the integral + // factories, which should be preferred. + // + // Example: + // + // auto a = absl::Seconds(1.5); // OK + // auto b = absl::Milliseconds(1500); // BETTER + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Nanoseconds(T n) + { + return n * Nanoseconds(1); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Microseconds(T n) + { + return n * Microseconds(1); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Milliseconds(T n) + { + return n * Milliseconds(1); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Seconds(T n) + { + if (n >= 0) + { // Note: `NaN >= 0` is false. + if (n >= static_cast((std::numeric_limits::max)())) + { + return InfiniteDuration(); + } + return time_internal::MakePosDoubleDuration(n); + } + else + { + if (std::isnan(n)) + return std::signbit(n) ? -InfiniteDuration() : InfiniteDuration(); + if (n <= (std::numeric_limits::min)()) + return -InfiniteDuration(); + return -time_internal::MakePosDoubleDuration(-n); + } + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Minutes(T n) + { + return n * Minutes(1); + } + template = 0> + ABSL_ATTRIBUTE_CONST_FUNCTION Duration Hours(T n) + { + return n * Hours(1); + } + + // ToInt64Nanoseconds() + // ToInt64Microseconds() + // ToInt64Milliseconds() + // ToInt64Seconds() + // ToInt64Minutes() + // ToInt64Hours() + // + // Helper functions that convert a Duration to an integral count of the + // indicated unit. These return the same results as the `IDivDuration()` + // function, though they usually do so more efficiently; see the + // documentation of `IDivDuration()` for details about overflow, etc. + // + // Example: + // + // absl::Duration d = absl::Milliseconds(1500); + // int64_t isec = absl::ToInt64Seconds(d); // isec == 1 + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Nanoseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Microseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Milliseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Seconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Minutes(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Hours(Duration d); + + // ToDoubleNanoseconds() + // ToDoubleMicroseconds() + // ToDoubleMilliseconds() + // ToDoubleSeconds() + // ToDoubleMinutes() + // ToDoubleHours() + // + // Helper functions that convert a Duration to a floating point count of the + // indicated unit. These functions are shorthand for the `FDivDuration()` + // function above; see its documentation for details about overflow, etc. + // + // Example: + // + // absl::Duration d = absl::Milliseconds(1500); + // double dsec = absl::ToDoubleSeconds(d); // dsec == 1.5 + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleNanoseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMicroseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMilliseconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleSeconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleMinutes(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToDoubleHours(Duration d); + + // FromChrono() + // + // Converts any of the pre-defined std::chrono durations to an absl::Duration. + // + // Example: + // + // std::chrono::milliseconds ms(123); + // absl::Duration d = absl::FromChrono(ms); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::nanoseconds& d + ); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::microseconds& d + ); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::milliseconds& d + ); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::seconds& d + ); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::minutes& d + ); + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::hours& d + ); + + // ToChronoNanoseconds() + // ToChronoMicroseconds() + // ToChronoMilliseconds() + // ToChronoSeconds() + // ToChronoMinutes() + // ToChronoHours() + // + // Converts an absl::Duration to any of the pre-defined std::chrono durations. + // If overflow would occur, the returned value will saturate at the min/max + // chrono duration value instead. + // + // Example: + // + // absl::Duration d = absl::Microseconds(123); + // auto x = absl::ToChronoMicroseconds(d); + // auto y = absl::ToChronoNanoseconds(d); // x == y + // auto z = absl::ToChronoSeconds(absl::InfiniteDuration()); + // // z == std::chrono::seconds::max() + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::nanoseconds ToChronoNanoseconds( + Duration d + ); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::microseconds ToChronoMicroseconds( + Duration d + ); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::milliseconds ToChronoMilliseconds( + Duration d + ); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::seconds ToChronoSeconds(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::minutes ToChronoMinutes(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::hours ToChronoHours(Duration d); + + // FormatDuration() + // + // Returns a string representing the duration in the form "72h3m0.5s". + // Returns "inf" or "-inf" for +/- `InfiniteDuration()`. + ABSL_ATTRIBUTE_CONST_FUNCTION std::string FormatDuration(Duration d); + + // Output stream operator. + inline std::ostream& operator<<(std::ostream& os, Duration d) + { + return os << FormatDuration(d); + } + + // Support for StrFormat(), StrCat() etc. + template + void AbslStringify(Sink& sink, Duration d) + { + sink.Append(FormatDuration(d)); + } + + // ParseDuration() + // + // Parses a duration string consisting of a possibly signed sequence of + // decimal numbers, each with an optional fractional part and a unit + // suffix. The valid suffixes are "ns", "us" "ms", "s", "m", and "h". + // Simple examples include "300ms", "-1.5h", and "2h45m". Parses "0" as + // `ZeroDuration()`. Parses "inf" and "-inf" as +/- `InfiniteDuration()`. + bool ParseDuration(absl::string_view dur_string, Duration* d); + + // AbslParseFlag() + // + // Parses a command-line flag string representation `text` into a Duration + // value. Duration flags must be specified in a format that is valid input for + // `absl::ParseDuration()`. + bool AbslParseFlag(absl::string_view text, Duration* dst, std::string* error); + + // AbslUnparseFlag() + // + // Unparses a Duration value into a command-line string representation using + // the format specified by `absl::ParseDuration()`. + std::string AbslUnparseFlag(Duration d); + + ABSL_DEPRECATED("Use AbslParseFlag() instead.") + bool ParseFlag(const std::string& text, Duration* dst, std::string* error); + ABSL_DEPRECATED("Use AbslUnparseFlag() instead.") + std::string UnparseFlag(Duration d); + + // Time + // + // An `absl::Time` represents a specific instant in time. Arithmetic operators + // are provided for naturally expressing time calculations. Instances are + // created using `absl::Now()` and the `absl::From*()` factory functions that + // accept the gamut of other time representations. Formatting and parsing + // functions are provided for conversion to and from strings. `absl::Time` + // should be passed by value rather than const reference. + // + // `absl::Time` assumes there are 60 seconds in a minute, which means the + // underlying time scales must be "smeared" to eliminate leap seconds. + // See https://developers.google.com/time/smear. + // + // Even though `absl::Time` supports a wide range of timestamps, exercise + // caution when using values in the distant past. `absl::Time` uses the + // Proleptic Gregorian calendar, which extends the Gregorian calendar backward + // to dates before its introduction in 1582. + // See https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar + // for more information. Use the ICU calendar classes to convert a date in + // some other calendar (http://userguide.icu-project.org/datetime/calendar). + // + // Similarly, standardized time zones are a reasonably recent innovation, with + // the Greenwich prime meridian being established in 1884. The TZ database + // itself does not profess accurate offsets for timestamps prior to 1970. The + // breakdown of future timestamps is subject to the whim of regional + // governments. + // + // The `absl::Time` class represents an instant in time as a count of clock + // ticks of some granularity (resolution) from some starting point (epoch). + // + // `absl::Time` uses a resolution that is high enough to avoid loss in + // precision, and a range that is wide enough to avoid overflow, when + // converting between tick counts in most Google time scales (i.e., resolution + // of at least one nanosecond, and range +/-100 billion years). Conversions + // between the time scales are performed by truncating (towards negative + // infinity) to the nearest representable point. + // + // Examples: + // + // absl::Time t1 = ...; + // absl::Time t2 = t1 + absl::Minutes(2); + // absl::Duration d = t2 - t1; // == absl::Minutes(2) + // + class Time + { + public: + // Value semantics. + + // Returns the Unix epoch. However, those reading your code may not know + // or expect the Unix epoch as the default value, so make your code more + // readable by explicitly initializing all instances before use. + // + // Example: + // absl::Time t = absl::UnixEpoch(); + // absl::Time t = absl::Now(); + // absl::Time t = absl::TimeFromTimeval(tv); + // absl::Time t = absl::InfinitePast(); + constexpr Time() = default; + + // Copyable. + constexpr Time(const Time& t) = default; + Time& operator=(const Time& t) = default; + + // Assignment operators. + Time& operator+=(Duration d) + { + rep_ += d; + return *this; + } + Time& operator-=(Duration d) + { + rep_ -= d; + return *this; + } + + // Time::Breakdown + // + // The calendar and wall-clock (aka "civil time") components of an + // `absl::Time` in a certain `absl::TimeZone`. This struct is not + // intended to represent an instant in time. So, rather than passing + // a `Time::Breakdown` to a function, pass an `absl::Time` and an + // `absl::TimeZone`. + // + // Deprecated. Use `absl::TimeZone::CivilInfo`. + struct ABSL_DEPRECATED("Use `absl::TimeZone::CivilInfo`.") Breakdown + { + int64_t year; // year (e.g., 2013) + int month; // month of year [1:12] + int day; // day of month [1:31] + int hour; // hour of day [0:23] + int minute; // minute of hour [0:59] + int second; // second of minute [0:59] + Duration subsecond; // [Seconds(0):Seconds(1)) if finite + int weekday; // 1==Mon, ..., 7=Sun + int yearday; // day of year [1:366] + + // Note: The following fields exist for backward compatibility + // with older APIs. Accessing these fields directly is a sign of + // imprudent logic in the calling code. Modern time-related code + // should only access this data indirectly by way of FormatTime(). + // These fields are undefined for InfiniteFuture() and InfinitePast(). + int offset; // seconds east of UTC + bool is_dst; // is offset non-standard? + const char* zone_abbr; // time-zone abbreviation (e.g., "PST") + }; + + // Time::In() + // + // Returns the breakdown of this instant in the given TimeZone. + // + // Deprecated. Use `absl::TimeZone::At(Time)`. + ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING + ABSL_DEPRECATED("Use `absl::TimeZone::At(Time)`.") + Breakdown In(TimeZone tz) const; + ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING + + template + friend H AbslHashValue(H h, Time t) + { + return H::combine(std::move(h), t.rep_); + } + + private: + friend constexpr Time time_internal::FromUnixDuration(Duration d); + friend constexpr Duration time_internal::ToUnixDuration(Time t); + friend constexpr bool operator<(Time lhs, Time rhs); + friend constexpr bool operator==(Time lhs, Time rhs); + friend Duration operator-(Time lhs, Time rhs); + friend constexpr Time UniversalEpoch(); + friend constexpr Time InfiniteFuture(); + friend constexpr Time InfinitePast(); + constexpr explicit Time(Duration rep) : + rep_(rep) + { + } + Duration rep_; + }; + + // Relational Operators + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Time lhs, Time rhs) + { + return lhs.rep_ < rhs.rep_; + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>(Time lhs, Time rhs) + { + return rhs < lhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator>=(Time lhs, Time rhs) + { + return !(lhs < rhs); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<=(Time lhs, Time rhs) + { + return !(rhs < lhs); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Time lhs, Time rhs) + { + return lhs.rep_ == rhs.rep_; + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator!=(Time lhs, Time rhs) + { + return !(lhs == rhs); + } + + // Additive Operators + ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator+(Time lhs, Duration rhs) + { + return lhs += rhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator+(Duration lhs, Time rhs) + { + return rhs += lhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline Time operator-(Time lhs, Duration rhs) + { + return lhs -= rhs; + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration operator-(Time lhs, Time rhs) + { + return lhs.rep_ - rhs.rep_; + } + + // UnixEpoch() + // + // Returns the `absl::Time` representing "1970-01-01 00:00:00.0 +0000". + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time UnixEpoch() + { + return Time(); + } + + // UniversalEpoch() + // + // Returns the `absl::Time` representing "0001-01-01 00:00:00.0 +0000", the + // epoch of the ICU Universal Time Scale. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time UniversalEpoch() + { + // 719162 is the number of days from 0001-01-01 to 1970-01-01, + // assuming the Gregorian calendar. + return Time( + time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, uint32_t{0}) + ); + } + + // InfiniteFuture() + // + // Returns an `absl::Time` that is infinitely far in the future. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfiniteFuture() + { + return Time(time_internal::MakeDuration((std::numeric_limits::max)(), ~uint32_t{0})); + } + + // InfinitePast() + // + // Returns an `absl::Time` that is infinitely far in the past. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfinitePast() + { + return Time(time_internal::MakeDuration((std::numeric_limits::min)(), ~uint32_t{0})); + } + + // FromUnixNanos() + // FromUnixMicros() + // FromUnixMillis() + // FromUnixSeconds() + // FromTimeT() + // FromUDate() + // FromUniversal() + // + // Creates an `absl::Time` from a variety of other representations. See + // https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixSeconds(int64_t s); + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromTimeT(time_t t); + ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUDate(double udate); + ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUniversal(int64_t universal); + + // ToUnixNanos() + // ToUnixMicros() + // ToUnixMillis() + // ToUnixSeconds() + // ToTimeT() + // ToUDate() + // ToUniversal() + // + // Converts an `absl::Time` to a variety of other representations. See + // https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html + // + // Note that these operations round down toward negative infinity where + // necessary to adjust to the resolution of the result type. Beware of + // possible time_t over/underflow in ToTime{T,val,spec}() on 32-bit platforms. + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixNanos(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMicros(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMillis(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixSeconds(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION time_t ToTimeT(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION double ToUDate(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUniversal(Time t); + + // DurationFromTimespec() + // DurationFromTimeval() + // ToTimespec() + // ToTimeval() + // TimeFromTimespec() + // TimeFromTimeval() + // ToTimespec() + // ToTimeval() + // + // Some APIs use a timespec or a timeval as a Duration (e.g., nanosleep(2) + // and select(2)), while others use them as a Time (e.g. clock_gettime(2) + // and gettimeofday(2)), so conversion functions are provided for both cases. + // The "to timespec/val" direction is easily handled via overloading, but + // for "from timespec/val" the desired type is part of the function name. + ABSL_ATTRIBUTE_CONST_FUNCTION Duration DurationFromTimespec(timespec ts); + ABSL_ATTRIBUTE_CONST_FUNCTION Duration DurationFromTimeval(timeval tv); + ABSL_ATTRIBUTE_CONST_FUNCTION timespec ToTimespec(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION timeval ToTimeval(Duration d); + ABSL_ATTRIBUTE_CONST_FUNCTION Time TimeFromTimespec(timespec ts); + ABSL_ATTRIBUTE_CONST_FUNCTION Time TimeFromTimeval(timeval tv); + ABSL_ATTRIBUTE_CONST_FUNCTION timespec ToTimespec(Time t); + ABSL_ATTRIBUTE_CONST_FUNCTION timeval ToTimeval(Time t); + + // FromChrono() + // + // Converts a std::chrono::system_clock::time_point to an absl::Time. + // + // Example: + // + // auto tp = std::chrono::system_clock::from_time_t(123); + // absl::Time t = absl::FromChrono(tp); + // // t == absl::FromTimeT(123) + ABSL_ATTRIBUTE_PURE_FUNCTION Time + FromChrono(const std::chrono::system_clock::time_point& tp); + + // ToChronoTime() + // + // Converts an absl::Time to a std::chrono::system_clock::time_point. If + // overflow would occur, the returned value will saturate at the min/max time + // point value instead. + // + // Example: + // + // absl::Time t = absl::FromTimeT(123); + // auto tp = absl::ToChronoTime(t); + // // tp == std::chrono::system_clock::from_time_t(123); + ABSL_ATTRIBUTE_CONST_FUNCTION std::chrono::system_clock::time_point + ToChronoTime(Time); + + // AbslParseFlag() + // + // Parses the command-line flag string representation `text` into a Time value. + // Time flags must be specified in a format that matches absl::RFC3339_full. + // + // For example: + // + // --start_time=2016-01-02T03:04:05.678+08:00 + // + // Note: A UTC offset (or 'Z' indicating a zero-offset from UTC) is required. + // + // Additionally, if you'd like to specify a time as a count of + // seconds/milliseconds/etc from the Unix epoch, use an absl::Duration flag + // and add that duration to absl::UnixEpoch() to get an absl::Time. + bool AbslParseFlag(absl::string_view text, Time* t, std::string* error); + + // AbslUnparseFlag() + // + // Unparses a Time value into a command-line string representation using + // the format specified by `absl::ParseTime()`. + std::string AbslUnparseFlag(Time t); + + ABSL_DEPRECATED("Use AbslParseFlag() instead.") + bool ParseFlag(const std::string& text, Time* t, std::string* error); + ABSL_DEPRECATED("Use AbslUnparseFlag() instead.") + std::string UnparseFlag(Time t); + + // TimeZone + // + // The `absl::TimeZone` is an opaque, small, value-type class representing a + // geo-political region within which particular rules are used for converting + // between absolute and civil times (see https://git.io/v59Ly). `absl::TimeZone` + // values are named using the TZ identifiers from the IANA Time Zone Database, + // such as "America/Los_Angeles" or "Australia/Sydney". `absl::TimeZone` values + // are created from factory functions such as `absl::LoadTimeZone()`. Note: + // strings like "PST" and "EDT" are not valid TZ identifiers. Prefer to pass by + // value rather than const reference. + // + // For more on the fundamental concepts of time zones, absolute times, and civil + // times, see https://github.com/google/cctz#fundamental-concepts + // + // Examples: + // + // absl::TimeZone utc = absl::UTCTimeZone(); + // absl::TimeZone pst = absl::FixedTimeZone(-8 * 60 * 60); + // absl::TimeZone loc = absl::LocalTimeZone(); + // absl::TimeZone lax; + // if (!absl::LoadTimeZone("America/Los_Angeles", &lax)) { + // // handle error case + // } + // + // See also: + // - https://github.com/google/cctz + // - https://www.iana.org/time-zones + // - https://en.wikipedia.org/wiki/Zoneinfo + class TimeZone + { + public: + explicit TimeZone(time_internal::cctz::time_zone tz) : + cz_(tz) + { + } + TimeZone() = default; // UTC, but prefer UTCTimeZone() to be explicit. + + // Copyable. + TimeZone(const TimeZone&) = default; + TimeZone& operator=(const TimeZone&) = default; + + explicit operator time_internal::cctz::time_zone() const + { + return cz_; + } + + std::string name() const + { + return cz_.name(); + } + + // TimeZone::CivilInfo + // + // Information about the civil time corresponding to an absolute time. + // This struct is not intended to represent an instant in time. So, rather + // than passing a `TimeZone::CivilInfo` to a function, pass an `absl::Time` + // and an `absl::TimeZone`. + struct CivilInfo + { + CivilSecond cs; + Duration subsecond; + + // Note: The following fields exist for backward compatibility + // with older APIs. Accessing these fields directly is a sign of + // imprudent logic in the calling code. Modern time-related code + // should only access this data indirectly by way of FormatTime(). + // These fields are undefined for InfiniteFuture() and InfinitePast(). + int offset; // seconds east of UTC + bool is_dst; // is offset non-standard? + const char* zone_abbr; // time-zone abbreviation (e.g., "PST") + }; + + // TimeZone::At(Time) + // + // Returns the civil time for this TimeZone at a certain `absl::Time`. + // If the input time is infinite, the output civil second will be set to + // CivilSecond::max() or min(), and the subsecond will be infinite. + // + // Example: + // + // const auto epoch = lax.At(absl::UnixEpoch()); + // // epoch.cs == 1969-12-31 16:00:00 + // // epoch.subsecond == absl::ZeroDuration() + // // epoch.offset == -28800 + // // epoch.is_dst == false + // // epoch.abbr == "PST" + CivilInfo At(Time t) const; + + // TimeZone::TimeInfo + // + // Information about the absolute times corresponding to a civil time. + // (Subseconds must be handled separately.) + // + // It is possible for a caller to pass a civil-time value that does + // not represent an actual or unique instant in time (due to a shift + // in UTC offset in the TimeZone, which results in a discontinuity in + // the civil-time components). For example, a daylight-saving-time + // transition skips or repeats civil times---in the United States, + // March 13, 2011 02:15 never occurred, while November 6, 2011 01:15 + // occurred twice---so requests for such times are not well-defined. + // To account for these possibilities, `absl::TimeZone::TimeInfo` is + // richer than just a single `absl::Time`. + struct TimeInfo + { + enum CivilKind + { + UNIQUE, // the civil time was singular (pre == trans == post) + SKIPPED, // the civil time did not exist (pre >= trans > post) + REPEATED, // the civil time was ambiguous (pre < trans <= post) + } kind; + Time pre; // time calculated using the pre-transition offset + Time trans; // when the civil-time discontinuity occurred + Time post; // time calculated using the post-transition offset + }; + + // TimeZone::At(CivilSecond) + // + // Returns an `absl::TimeInfo` containing the absolute time(s) for this + // TimeZone at an `absl::CivilSecond`. When the civil time is skipped or + // repeated, returns times calculated using the pre-transition and post- + // transition UTC offsets, plus the transition time itself. + // + // Examples: + // + // // A unique civil time + // const auto jan01 = lax.At(absl::CivilSecond(2011, 1, 1, 0, 0, 0)); + // // jan01.kind == TimeZone::TimeInfo::UNIQUE + // // jan01.pre is 2011-01-01 00:00:00 -0800 + // // jan01.trans is 2011-01-01 00:00:00 -0800 + // // jan01.post is 2011-01-01 00:00:00 -0800 + // + // // A Spring DST transition, when there is a gap in civil time + // const auto mar13 = lax.At(absl::CivilSecond(2011, 3, 13, 2, 15, 0)); + // // mar13.kind == TimeZone::TimeInfo::SKIPPED + // // mar13.pre is 2011-03-13 03:15:00 -0700 + // // mar13.trans is 2011-03-13 03:00:00 -0700 + // // mar13.post is 2011-03-13 01:15:00 -0800 + // + // // A Fall DST transition, when civil times are repeated + // const auto nov06 = lax.At(absl::CivilSecond(2011, 11, 6, 1, 15, 0)); + // // nov06.kind == TimeZone::TimeInfo::REPEATED + // // nov06.pre is 2011-11-06 01:15:00 -0700 + // // nov06.trans is 2011-11-06 01:00:00 -0800 + // // nov06.post is 2011-11-06 01:15:00 -0800 + TimeInfo At(CivilSecond ct) const; + + // TimeZone::NextTransition() + // TimeZone::PrevTransition() + // + // Finds the time of the next/previous offset change in this time zone. + // + // By definition, `NextTransition(t, &trans)` returns false when `t` is + // `InfiniteFuture()`, and `PrevTransition(t, &trans)` returns false + // when `t` is `InfinitePast()`. If the zone has no transitions, the + // result will also be false no matter what the argument. + // + // Otherwise, when `t` is `InfinitePast()`, `NextTransition(t, &trans)` + // returns true and sets `trans` to the first recorded transition. Chains + // of calls to `NextTransition()/PrevTransition()` will eventually return + // false, but it is unspecified exactly when `NextTransition(t, &trans)` + // jumps to false, or what time is set by `PrevTransition(t, &trans)` for + // a very distant `t`. + // + // Note: Enumeration of time-zone transitions is for informational purposes + // only. Modern time-related code should not care about when offset changes + // occur. + // + // Example: + // absl::TimeZone nyc; + // if (!absl::LoadTimeZone("America/New_York", &nyc)) { ... } + // const auto now = absl::Now(); + // auto t = absl::InfinitePast(); + // absl::TimeZone::CivilTransition trans; + // while (t <= now && nyc.NextTransition(t, &trans)) { + // // transition: trans.from -> trans.to + // t = nyc.At(trans.to).trans; + // } + struct CivilTransition + { + CivilSecond from; // the civil time we jump from + CivilSecond to; // the civil time we jump to + }; + bool NextTransition(Time t, CivilTransition* trans) const; + bool PrevTransition(Time t, CivilTransition* trans) const; + + template + friend H AbslHashValue(H h, TimeZone tz) + { + return H::combine(std::move(h), tz.cz_); + } + + private: + friend bool operator==(TimeZone a, TimeZone b) + { + return a.cz_ == b.cz_; + } + friend bool operator!=(TimeZone a, TimeZone b) + { + return a.cz_ != b.cz_; + } + friend std::ostream& operator<<(std::ostream& os, TimeZone tz) + { + return os << tz.name(); + } + + time_internal::cctz::time_zone cz_; + }; + + // LoadTimeZone() + // + // Loads the named zone. May perform I/O on the initial load of the named + // zone. If the name is invalid, or some other kind of error occurs, returns + // `false` and `*tz` is set to the UTC time zone. + inline bool LoadTimeZone(absl::string_view name, TimeZone* tz) + { + if (name == "localtime") + { + *tz = TimeZone(time_internal::cctz::local_time_zone()); + return true; + } + time_internal::cctz::time_zone cz; + const bool b = time_internal::cctz::load_time_zone(std::string(name), &cz); + *tz = TimeZone(cz); + return b; + } + + // FixedTimeZone() + // + // Returns a TimeZone that is a fixed offset (seconds east) from UTC. + // Note: If the absolute value of the offset is greater than 24 hours + // you'll get UTC (i.e., no offset) instead. + inline TimeZone FixedTimeZone(int seconds) + { + return TimeZone( + time_internal::cctz::fixed_time_zone(std::chrono::seconds(seconds)) + ); + } + + // UTCTimeZone() + // + // Convenience method returning the UTC time zone. + inline TimeZone UTCTimeZone() + { + return TimeZone(time_internal::cctz::utc_time_zone()); + } + + // LocalTimeZone() + // + // Convenience method returning the local time zone, or UTC if there is + // no configured local zone. Warning: Be wary of using LocalTimeZone(), + // and particularly so in a server process, as the zone configured for the + // local machine should be irrelevant. Prefer an explicit zone name. + inline TimeZone LocalTimeZone() + { + return TimeZone(time_internal::cctz::local_time_zone()); + } + + // ToCivilSecond() + // ToCivilMinute() + // ToCivilHour() + // ToCivilDay() + // ToCivilMonth() + // ToCivilYear() + // + // Helpers for TimeZone::At(Time) to return particularly aligned civil times. + // + // Example: + // + // absl::Time t = ...; + // absl::TimeZone tz = ...; + // const auto cd = absl::ToCivilDay(t, tz); + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilSecond ToCivilSecond(Time t, TimeZone tz) + { + return tz.At(t).cs; // already a CivilSecond + } + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilMinute ToCivilMinute(Time t, TimeZone tz) + { + return CivilMinute(tz.At(t).cs); + } + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilHour ToCivilHour(Time t, TimeZone tz) + { + return CivilHour(tz.At(t).cs); + } + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilDay ToCivilDay(Time t, TimeZone tz) + { + return CivilDay(tz.At(t).cs); + } + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilMonth ToCivilMonth(Time t, TimeZone tz) + { + return CivilMonth(tz.At(t).cs); + } + ABSL_ATTRIBUTE_PURE_FUNCTION inline CivilYear ToCivilYear(Time t, TimeZone tz) + { + return CivilYear(tz.At(t).cs); + } + + // FromCivil() + // + // Helper for TimeZone::At(CivilSecond) that provides "order-preserving + // semantics." If the civil time maps to a unique time, that time is + // returned. If the civil time is repeated in the given time zone, the + // time using the pre-transition offset is returned. Otherwise, the + // civil time is skipped in the given time zone, and the transition time + // is returned. This means that for any two civil times, ct1 and ct2, + // (ct1 < ct2) => (FromCivil(ct1) <= FromCivil(ct2)), the equal case + // being when two non-existent civil times map to the same transition time. + // + // Note: Accepts civil times of any alignment. + ABSL_ATTRIBUTE_PURE_FUNCTION inline Time FromCivil(CivilSecond ct, TimeZone tz) + { + const auto ti = tz.At(ct); + if (ti.kind == TimeZone::TimeInfo::SKIPPED) + return ti.trans; + return ti.pre; + } + + // TimeConversion + // + // An `absl::TimeConversion` represents the conversion of year, month, day, + // hour, minute, and second values (i.e., a civil time), in a particular + // `absl::TimeZone`, to a time instant (an absolute time), as returned by + // `absl::ConvertDateTime()`. Legacy version of `absl::TimeZone::TimeInfo`. + // + // Deprecated. Use `absl::TimeZone::TimeInfo`. + struct ABSL_DEPRECATED("Use `absl::TimeZone::TimeInfo`.") TimeConversion + { + Time pre; // time calculated using the pre-transition offset + Time trans; // when the civil-time discontinuity occurred + Time post; // time calculated using the post-transition offset + + enum Kind + { + UNIQUE, // the civil time was singular (pre == trans == post) + SKIPPED, // the civil time did not exist + REPEATED, // the civil time was ambiguous + }; + Kind kind; + + bool normalized; // input values were outside their valid ranges + }; + + // ConvertDateTime() + // + // Legacy version of `absl::TimeZone::At(absl::CivilSecond)` that takes + // the civil time as six, separate values (YMDHMS). + // + // The input month, day, hour, minute, and second values can be outside + // of their valid ranges, in which case they will be "normalized" during + // the conversion. + // + // Example: + // + // // "October 32" normalizes to "November 1". + // absl::TimeConversion tc = + // absl::ConvertDateTime(2013, 10, 32, 8, 30, 0, lax); + // // tc.kind == TimeConversion::UNIQUE && tc.normalized == true + // // absl::ToCivilDay(tc.pre, tz).month() == 11 + // // absl::ToCivilDay(tc.pre, tz).day() == 1 + // + // Deprecated. Use `absl::TimeZone::At(CivilSecond)`. + ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING + ABSL_DEPRECATED("Use `absl::TimeZone::At(CivilSecond)`.") + TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour, int min, int sec, TimeZone tz); + ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING + + // FromDateTime() + // + // A convenience wrapper for `absl::ConvertDateTime()` that simply returns + // the "pre" `absl::Time`. That is, the unique result, or the instant that + // is correct using the pre-transition offset (as if the transition never + // happened). + // + // Example: + // + // absl::Time t = absl::FromDateTime(2017, 9, 26, 9, 30, 0, lax); + // // t = 2017-09-26 09:30:00 -0700 + // + // Deprecated. Use `absl::FromCivil(CivilSecond, TimeZone)`. Note that the + // behavior of `FromCivil()` differs from `FromDateTime()` for skipped civil + // times. If you care about that see `absl::TimeZone::At(absl::CivilSecond)`. + ABSL_DEPRECATED("Use `absl::FromCivil(CivilSecond, TimeZone)`.") + inline Time FromDateTime(int64_t year, int mon, int day, int hour, int min, int sec, TimeZone tz) + { + ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING + return ConvertDateTime(year, mon, day, hour, min, sec, tz).pre; + ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING + } + + // FromTM() + // + // Converts the `tm_year`, `tm_mon`, `tm_mday`, `tm_hour`, `tm_min`, and + // `tm_sec` fields to an `absl::Time` using the given time zone. See ctime(3) + // for a description of the expected values of the tm fields. If the civil time + // is unique (see `absl::TimeZone::At(absl::CivilSecond)` above), the matching + // time instant is returned. Otherwise, the `tm_isdst` field is consulted to + // choose between the possible results. For a repeated civil time, `tm_isdst != + // 0` returns the matching DST instant, while `tm_isdst == 0` returns the + // matching non-DST instant. For a skipped civil time there is no matching + // instant, so `tm_isdst != 0` returns the DST instant, and `tm_isdst == 0` + // returns the non-DST instant, that would have matched if the transition never + // happened. + ABSL_ATTRIBUTE_PURE_FUNCTION Time FromTM(const struct tm& tm, TimeZone tz); + + // ToTM() + // + // Converts the given `absl::Time` to a struct tm using the given time zone. + // See ctime(3) for a description of the values of the tm fields. + ABSL_ATTRIBUTE_PURE_FUNCTION struct tm ToTM(Time t, TimeZone tz); + + // RFC3339_full + // RFC3339_sec + // + // FormatTime()/ParseTime() format specifiers for RFC3339 date/time strings, + // with trailing zeros trimmed or with fractional seconds omitted altogether. + // + // Note that RFC3339_sec[] matches an ISO 8601 extended format for date and + // time with UTC offset. Also note the use of "%Y": RFC3339 mandates that + // years have exactly four digits, but we allow them to take their natural + // width. + ABSL_DLL extern const char RFC3339_full[]; // %Y-%m-%d%ET%H:%M:%E*S%Ez + ABSL_DLL extern const char RFC3339_sec[]; // %Y-%m-%d%ET%H:%M:%S%Ez + + // RFC1123_full + // RFC1123_no_wday + // + // FormatTime()/ParseTime() format specifiers for RFC1123 date/time strings. + ABSL_DLL extern const char RFC1123_full[]; // %a, %d %b %E4Y %H:%M:%S %z + ABSL_DLL extern const char RFC1123_no_wday[]; // %d %b %E4Y %H:%M:%S %z + + // FormatTime() + // + // Formats the given `absl::Time` in the `absl::TimeZone` according to the + // provided format string. Uses strftime()-like formatting options, with + // the following extensions: + // + // - %Ez - RFC3339-compatible numeric UTC offset (+hh:mm or -hh:mm) + // - %E*z - Full-resolution numeric UTC offset (+hh:mm:ss or -hh:mm:ss) + // - %E#S - Seconds with # digits of fractional precision + // - %E*S - Seconds with full fractional precision (a literal '*') + // - %E#f - Fractional seconds with # digits of precision + // - %E*f - Fractional seconds with full precision (a literal '*') + // - %E4Y - Four-character years (-999 ... -001, 0000, 0001 ... 9999) + // - %ET - The RFC3339 "date-time" separator "T" + // + // Note that %E0S behaves like %S, and %E0f produces no characters. In + // contrast %E*f always produces at least one digit, which may be '0'. + // + // Note that %Y produces as many characters as it takes to fully render the + // year. A year outside of [-999:9999] when formatted with %E4Y will produce + // more than four characters, just like %Y. + // + // We recommend that format strings include the UTC offset (%z, %Ez, or %E*z) + // so that the result uniquely identifies a time instant. + // + // Example: + // + // absl::CivilSecond cs(2013, 1, 2, 3, 4, 5); + // absl::Time t = absl::FromCivil(cs, lax); + // std::string f = absl::FormatTime("%H:%M:%S", t, lax); // "03:04:05" + // f = absl::FormatTime("%H:%M:%E3S", t, lax); // "03:04:05.000" + // + // Note: If the given `absl::Time` is `absl::InfiniteFuture()`, the returned + // string will be exactly "infinite-future". If the given `absl::Time` is + // `absl::InfinitePast()`, the returned string will be exactly "infinite-past". + // In both cases the given format string and `absl::TimeZone` are ignored. + // + ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(absl::string_view format, Time t, TimeZone tz); + + // Convenience functions that format the given time using the RFC3339_full + // format. The first overload uses the provided TimeZone, while the second + // uses LocalTimeZone(). + ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t, TimeZone tz); + ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t); + + // Output stream operator. + inline std::ostream& operator<<(std::ostream& os, Time t) + { + return os << FormatTime(t); + } + + // Support for StrFormat(), StrCat() etc. + template + void AbslStringify(Sink& sink, Time t) + { + sink.Append(FormatTime(t)); + } + + // ParseTime() + // + // Parses an input string according to the provided format string and + // returns the corresponding `absl::Time`. Uses strftime()-like formatting + // options, with the same extensions as FormatTime(), but with the + // exceptions that %E#S is interpreted as %E*S, and %E#f as %E*f. %Ez + // and %E*z also accept the same inputs, which (along with %z) includes + // 'z' and 'Z' as synonyms for +00:00. %ET accepts either 'T' or 't'. + // + // %Y consumes as many numeric characters as it can, so the matching data + // should always be terminated with a non-numeric. %E4Y always consumes + // exactly four characters, including any sign. + // + // Unspecified fields are taken from the default date and time of ... + // + // "1970-01-01 00:00:00.0 +0000" + // + // For example, parsing a string of "15:45" (%H:%M) will return an absl::Time + // that represents "1970-01-01 15:45:00.0 +0000". + // + // Note that since ParseTime() returns time instants, it makes the most sense + // to parse fully-specified date/time strings that include a UTC offset (%z, + // %Ez, or %E*z). + // + // Note also that `absl::ParseTime()` only heeds the fields year, month, day, + // hour, minute, (fractional) second, and UTC offset. Other fields, like + // weekday (%a or %A), while parsed for syntactic validity, are ignored + // in the conversion. + // + // Date and time fields that are out-of-range will be treated as errors + // rather than normalizing them like `absl::CivilSecond` does. For example, + // it is an error to parse the date "Oct 32, 2013" because 32 is out of range. + // + // A leap second of ":60" is normalized to ":00" of the following minute + // with fractional seconds discarded. The following table shows how the + // given seconds and subseconds will be parsed: + // + // "59.x" -> 59.x // exact + // "60.x" -> 00.0 // normalized + // "00.x" -> 00.x // exact + // + // Errors are indicated by returning false and assigning an error message + // to the "err" out param if it is non-null. + // + // Note: If the input string is exactly "infinite-future", the returned + // `absl::Time` will be `absl::InfiniteFuture()` and `true` will be returned. + // If the input string is "infinite-past", the returned `absl::Time` will be + // `absl::InfinitePast()` and `true` will be returned. + // + bool ParseTime(absl::string_view format, absl::string_view input, Time* time, std::string* err); + + // Like ParseTime() above, but if the format string does not contain a UTC + // offset specification (%z/%Ez/%E*z) then the input is interpreted in the + // given TimeZone. This means that the input, by itself, does not identify a + // unique instant. Being time-zone dependent, it also admits the possibility + // of ambiguity or non-existence, in which case the "pre" time (as defined + // by TimeZone::TimeInfo) is returned. For these reasons we recommend that + // all date/time strings include a UTC offset so they're context independent. + bool ParseTime(absl::string_view format, absl::string_view input, TimeZone tz, Time* time, std::string* err); + + // ============================================================================ + // Implementation Details Follow + // ============================================================================ + + namespace time_internal + { + + // Creates a Duration with a given representation. + // REQUIRES: hi,lo is a valid representation of a Duration as specified + // in time/duration.cc. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi, uint32_t lo = 0) + { + return Duration(hi, lo); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeDuration(int64_t hi, int64_t lo) + { + return MakeDuration(hi, static_cast(lo)); + } + + // Make a Duration value from a floating-point number, as long as that number + // is in the range [ 0 .. numeric_limits::max ), that is, as long as + // it's positive and can be converted to int64_t without risk of UB. + ABSL_ATTRIBUTE_CONST_FUNCTION inline Duration MakePosDoubleDuration(double n) + { + const int64_t int_secs = static_cast(n); + const uint32_t ticks = static_cast( + std::round((n - static_cast(int_secs)) * kTicksPerSecond) + ); + return ticks < kTicksPerSecond ? MakeDuration(int_secs, ticks) : MakeDuration(int_secs + 1, ticks - kTicksPerSecond); + } + + // Creates a normalized Duration from an almost-normalized (sec,ticks) + // pair. sec may be positive or negative. ticks must be in the range + // -kTicksPerSecond < *ticks < kTicksPerSecond. If ticks is negative it + // will be normalized to a positive value in the resulting Duration. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeNormalizedDuration( + int64_t sec, int64_t ticks + ) + { + return (ticks < 0) ? MakeDuration(sec - 1, ticks + kTicksPerSecond) : MakeDuration(sec, ticks); + } + + // Provide access to the Duration representation. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d) + { + return d.rep_hi_.Get(); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d) + { + return d.rep_lo_; + } + + // Returns true iff d is positive or negative infinity. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool IsInfiniteDuration(Duration d) + { + return GetRepLo(d) == ~uint32_t{0}; + } + + // Returns an infinite Duration with the opposite sign. + // REQUIRES: IsInfiniteDuration(d) + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration OppositeInfinity(Duration d) + { + return GetRepHi(d) < 0 ? MakeDuration((std::numeric_limits::max)(), ~uint32_t{0}) : MakeDuration((std::numeric_limits::min)(), ~uint32_t{0}); + } + + // Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t NegateAndSubtractOne( + int64_t n + ) + { + // Note: Good compilers will optimize this expression to ~n when using + // a two's-complement representation (which is required for int64_t). + return (n < 0) ? -(n + 1) : (-n) - 1; + } + + // Map between a Time and a Duration since the Unix epoch. Note that these + // functions depend on the above mentioned choice of the Unix epoch for the + // Time representation (and both need to be Time friends). Without this + // knowledge, we would need to add-in/subtract-out UnixEpoch() respectively. + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixDuration(Duration d) + { + return Time(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration ToUnixDuration(Time t) + { + return t.rep_; + } + + template + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<1, N>) + { + static_assert(0 < N && N <= 1000 * 1000 * 1000, "Unsupported ratio"); + // Subsecond ratios cannot overflow. + return MakeNormalizedDuration( + v / N, v % N * kTicksPerNanosecond * 1000 * 1000 * 1000 / N + ); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<60>) + { + return (v <= (std::numeric_limits::max)() / 60 && + v >= (std::numeric_limits::min)() / 60) ? + MakeDuration(v * 60) : + v > 0 ? InfiniteDuration() : + -InfiniteDuration(); + } + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration FromInt64(int64_t v, std::ratio<3600>) + { + return (v <= (std::numeric_limits::max)() / 3600 && + v >= (std::numeric_limits::min)() / 3600) ? + MakeDuration(v * 3600) : + v > 0 ? InfiniteDuration() : + -InfiniteDuration(); + } + + // IsValidRep64(0) is true if the expression `int64_t{std::declval()}` is + // valid. That is, if a T can be assigned to an int64_t without narrowing. + template + constexpr auto IsValidRep64(int) -> decltype(int64_t{std::declval()} == 0) + { + return true; + } + template + constexpr auto IsValidRep64(char) -> bool + { + return false; + } + + // Converts a std::chrono::duration to an absl::Duration. + template + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::duration& d + ) + { + static_assert(IsValidRep64(0), "duration::rep is invalid"); + return FromInt64(int64_t{d.count()}, Period{}); + } + + template + ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64(Duration d, Ratio) + { + // Note: This may be used on MSVC, which may have a system_clock period of + // std::ratio<1, 10 * 1000 * 1000> + return ToInt64Seconds(d * Ratio::den / Ratio::num); + } + // Fastpath implementations for the 6 common duration units. + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::nano) + { + return ToInt64Nanoseconds(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::micro) + { + return ToInt64Microseconds(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::milli) + { + return ToInt64Milliseconds(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::ratio<1>) + { + return ToInt64Seconds(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::ratio<60>) + { + return ToInt64Minutes(d); + } + ABSL_ATTRIBUTE_CONST_FUNCTION inline int64_t ToInt64(Duration d, std::ratio<3600>) + { + return ToInt64Hours(d); + } + + // Converts an absl::Duration to a chrono duration of type T. + template + ABSL_ATTRIBUTE_CONST_FUNCTION T ToChronoDuration(Duration d) + { + using Rep = typename T::rep; + using Period = typename T::period; + static_assert(IsValidRep64(0), "duration::rep is invalid"); + if (time_internal::IsInfiniteDuration(d)) + return d < ZeroDuration() ? (T::min)() : (T::max)(); + const auto v = ToInt64(d, Period{}); + if (v > (std::numeric_limits::max)()) + return (T::max)(); + if (v < (std::numeric_limits::min)()) + return (T::min)(); + return T{v}; + } + + } // namespace time_internal + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator<(Duration lhs, Duration rhs) + { + return time_internal::GetRepHi(lhs) != time_internal::GetRepHi(rhs) ? time_internal::GetRepHi(lhs) < time_internal::GetRepHi(rhs) : time_internal::GetRepHi(lhs) == (std::numeric_limits::min)() ? time_internal::GetRepLo(lhs) + 1 < time_internal::GetRepLo(rhs) + 1 : + time_internal::GetRepLo(lhs) < time_internal::GetRepLo(rhs); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr bool operator==(Duration lhs, Duration rhs) + { + return time_internal::GetRepHi(lhs) == time_internal::GetRepHi(rhs) && + time_internal::GetRepLo(lhs) == time_internal::GetRepLo(rhs); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration operator-(Duration d) + { + // This is a little interesting because of the special cases. + // + // If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're + // dealing with an integral number of seconds, and the only special case is + // the maximum negative finite duration, which can't be negated. + // + // Infinities stay infinite, and just change direction. + // + // Finally we're in the case where rep_lo_ is non-zero, and we can borrow + // a second's worth of ticks and avoid overflow (as negating int64_t-min + 1 + // is safe). + return time_internal::GetRepLo(d) == 0 ? time_internal::GetRepHi(d) == + (std::numeric_limits::min)() ? + InfiniteDuration() : + time_internal::MakeDuration(-time_internal::GetRepHi(d)) : + time_internal::IsInfiniteDuration(d) ? time_internal::OppositeInfinity(d) : + time_internal::MakeDuration( + time_internal::NegateAndSubtractOne( + time_internal::GetRepHi(d) + ), + time_internal::kTicksPerSecond - + time_internal::GetRepLo(d) + ); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration InfiniteDuration() + { + return time_internal::MakeDuration((std::numeric_limits::max)(), ~uint32_t{0}); + } + + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::nanoseconds& d + ) + { + return time_internal::FromChrono(d); + } + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::microseconds& d + ) + { + return time_internal::FromChrono(d); + } + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::milliseconds& d + ) + { + return time_internal::FromChrono(d); + } + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::seconds& d + ) + { + return time_internal::FromChrono(d); + } + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::minutes& d + ) + { + return time_internal::FromChrono(d); + } + ABSL_ATTRIBUTE_PURE_FUNCTION constexpr Duration FromChrono( + const std::chrono::hours& d + ) + { + return time_internal::FromChrono(d); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns) + { + return time_internal::FromUnixDuration(Nanoseconds(ns)); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us) + { + return time_internal::FromUnixDuration(Microseconds(us)); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms) + { + return time_internal::FromUnixDuration(Milliseconds(ms)); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixSeconds(int64_t s) + { + return time_internal::FromUnixDuration(Seconds(s)); + } + + ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromTimeT(time_t t) + { + return time_internal::FromUnixDuration(Seconds(t)); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_TIME_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/any.h b/CAPI/cpp/grpc/include/absl/types/any.h new file mode 100644 index 00000000..894e25aa --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/any.h @@ -0,0 +1,571 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// any.h +// ----------------------------------------------------------------------------- +// +// This header file define the `absl::any` type for holding a type-safe value +// of any type. The 'absl::any` type is useful for providing a way to hold +// something that is, as yet, unspecified. Such unspecified types +// traditionally are passed between API boundaries until they are later cast to +// their "destination" types. To cast to such a destination type, use +// `absl::any_cast()`. Note that when casting an `absl::any`, you must cast it +// to an explicit type; implicit conversions will throw. +// +// Example: +// +// auto a = absl::any(65); +// absl::any_cast(a); // 65 +// absl::any_cast(a); // throws absl::bad_any_cast +// absl::any_cast(a); // throws absl::bad_any_cast +// +// `absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction +// and is designed to be a drop-in replacement for code compliant with C++17. +// +// Traditionally, the behavior of casting to a temporary unspecified type has +// been accomplished with the `void *` paradigm, where the pointer was to some +// other unspecified type. `absl::any` provides an "owning" version of `void *` +// that avoids issues of pointer management. +// +// Note: just as in the case of `void *`, use of `absl::any` (and its C++17 +// version `std::any`) is a code smell indicating that your API might not be +// constructed correctly. We have seen that most uses of `any` are unwarranted, +// and `absl::any`, like `std::any`, is difficult to use properly. Before using +// this abstraction, make sure that you should not instead be rewriting your +// code to be more specific. +// +// Abseil has also released an `absl::variant` type (a C++11 compatible version +// of the C++17 `std::variant`), which is generally preferred for use over +// `absl::any`. +#ifndef ABSL_TYPES_ANY_H_ +#define ABSL_TYPES_ANY_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_USES_STD_ANY + +#include // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::any; + using std::any_cast; + using std::bad_any_cast; + using std::make_any; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_ANY + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/fast_type_id.h" +#include "absl/meta/type_traits.h" +#include "absl/types/bad_any_cast.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class any; + + // swap() + // + // Swaps two `absl::any` values. Equivalent to `x.swap(y) where `x` and `y` are + // `absl::any` types. + void swap(any& x, any& y) noexcept; + + // make_any() + // + // Constructs an `absl::any` of type `T` with the given arguments. + template + any make_any(Args&&... args); + + // Overload of `absl::make_any()` for constructing an `absl::any` type from an + // initializer list. + template + any make_any(std::initializer_list il, Args&&... args); + + // any_cast() + // + // Statically casts the value of a `const absl::any` type to the given type. + // This function will throw `absl::bad_any_cast` if the stored value type of the + // `absl::any` does not match the cast. + // + // `any_cast()` can also be used to get a reference to the internal storage iff + // a reference type is passed as its `ValueType`: + // + // Example: + // + // absl::any my_any = std::vector(); + // absl::any_cast&>(my_any).push_back(42); + template + ValueType any_cast(const any& operand); + + // Overload of `any_cast()` to statically cast the value of a non-const + // `absl::any` type to the given type. This function will throw + // `absl::bad_any_cast` if the stored value type of the `absl::any` does not + // match the cast. + template + ValueType any_cast(any& operand); // NOLINT(runtime/references) + + // Overload of `any_cast()` to statically cast the rvalue of an `absl::any` + // type. This function will throw `absl::bad_any_cast` if the stored value type + // of the `absl::any` does not match the cast. + template + ValueType any_cast(any&& operand); + + // Overload of `any_cast()` to statically cast the value of a const pointer + // `absl::any` type to the given pointer type, or `nullptr` if the stored value + // type of the `absl::any` does not match the cast. + template + const ValueType* any_cast(const any* operand) noexcept; + + // Overload of `any_cast()` to statically cast the value of a pointer + // `absl::any` type to the given pointer type, or `nullptr` if the stored value + // type of the `absl::any` does not match the cast. + template + ValueType* any_cast(any* operand) noexcept; + + // ----------------------------------------------------------------------------- + // absl::any + // ----------------------------------------------------------------------------- + // + // An `absl::any` object provides the facility to either store an instance of a + // type, known as the "contained object", or no value. An `absl::any` is used to + // store values of types that are unknown at compile time. The `absl::any` + // object, when containing a value, must contain a value type; storing a + // reference type is neither desired nor supported. + // + // An `absl::any` can only store a type that is copy-constructible; move-only + // types are not allowed within an `any` object. + // + // Example: + // + // auto a = absl::any(65); // Literal, copyable + // auto b = absl::any(std::vector()); // Default-initialized, copyable + // std::unique_ptr my_foo; + // auto c = absl::any(std::move(my_foo)); // Error, not copy-constructible + // + // Note that `absl::any` makes use of decayed types (`absl::decay_t` in this + // context) to remove const-volatile qualifiers (known as "cv qualifiers"), + // decay functions to function pointers, etc. We essentially "decay" a given + // type into its essential type. + // + // `absl::any` makes use of decayed types when determining the basic type `T` of + // the value to store in the any's contained object. In the documentation below, + // we explicitly denote this by using the phrase "a decayed type of `T`". + // + // Example: + // + // const int a = 4; + // absl::any foo(a); // Decay ensures we store an "int", not a "const int&". + // + // void my_function() {} + // absl::any bar(my_function); // Decay ensures we store a function pointer. + // + // `absl::any` is a C++11 compatible version of the C++17 `std::any` abstraction + // and is designed to be a drop-in replacement for code compliant with C++17. + class any + { + private: + template + struct IsInPlaceType; + + public: + // Constructors + + // Constructs an empty `absl::any` object (`any::has_value()` will return + // `false`). + constexpr any() noexcept; + + // Copy constructs an `absl::any` object with a "contained object" of the + // passed type of `other` (or an empty `absl::any` if `other.has_value()` is + // `false`. + any(const any& other) : + obj_(other.has_value() ? other.obj_->Clone() : std::unique_ptr()) + { + } + + // Move constructs an `absl::any` object with a "contained object" of the + // passed type of `other` (or an empty `absl::any` if `other.has_value()` is + // `false`). + any(any&& other) noexcept = default; + + // Constructs an `absl::any` object with a "contained object" of the decayed + // type of `T`, which is initialized via `std::forward(value)`. + // + // This constructor will not participate in overload resolution if the + // decayed type of `T` is not copy-constructible. + template< + typename T, + typename VT = absl::decay_t, + absl::enable_if_t, + IsInPlaceType, + absl::negation>>::value>* = nullptr> + any(T&& value) : + obj_(new Obj(in_place, std::forward(value))) + { + } + + // Constructs an `absl::any` object with a "contained object" of the decayed + // type of `T`, which is initialized via `std::forward(value)`. + template, absl::enable_if_t, std::is_constructible>::value>* = nullptr> + explicit any(in_place_type_t /*tag*/, Args&&... args) : + obj_(new Obj(in_place, std::forward(args)...)) + { + } + + // Constructs an `absl::any` object with a "contained object" of the passed + // type `VT` as a decayed type of `T`. `VT` is initialized as if + // direct-non-list-initializing an object of type `VT` with the arguments + // `initializer_list, std::forward(args)...`. + template< + typename T, + typename U, + typename... Args, + typename VT = absl::decay_t, + absl::enable_if_t< + absl::conjunction, std::is_constructible&, Args...>>::value>* = nullptr> + explicit any(in_place_type_t /*tag*/, std::initializer_list ilist, Args&&... args) : + obj_(new Obj(in_place, ilist, std::forward(args)...)) + { + } + + // Assignment operators + + // Copy assigns an `absl::any` object with a "contained object" of the + // passed type. + any& operator=(const any& rhs) + { + any(rhs).swap(*this); + return *this; + } + + // Move assigns an `absl::any` object with a "contained object" of the + // passed type. `rhs` is left in a valid but otherwise unspecified state. + any& operator=(any&& rhs) noexcept + { + any(std::move(rhs)).swap(*this); + return *this; + } + + // Assigns an `absl::any` object with a "contained object" of the passed type. + template, absl::enable_if_t>, std::is_copy_constructible>::value>* = nullptr> + any& operator=(T&& rhs) + { + any tmp(in_place_type_t(), std::forward(rhs)); + tmp.swap(*this); + return *this; + } + + // Modifiers + + // any::emplace() + // + // Emplaces a value within an `absl::any` object by calling `any::reset()`, + // initializing the contained value as if direct-non-list-initializing an + // object of type `VT` with the arguments `std::forward(args)...`, and + // returning a reference to the new contained value. + // + // Note: If an exception is thrown during the call to `VT`'s constructor, + // `*this` does not contain a value, and any previously contained value has + // been destroyed. + template< + typename T, + typename... Args, + typename VT = absl::decay_t, + absl::enable_if_t::value && std::is_constructible::value>* = nullptr> + VT& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + reset(); // NOTE: reset() is required here even in the world of exceptions. + Obj* const object_ptr = + new Obj(in_place, std::forward(args)...); + obj_ = std::unique_ptr(object_ptr); + return object_ptr->value; + } + + // Overload of `any::emplace()` to emplace a value within an `absl::any` + // object by calling `any::reset()`, initializing the contained value as if + // direct-non-list-initializing an object of type `VT` with the arguments + // `initializer_list, std::forward(args)...`, and returning a reference + // to the new contained value. + // + // Note: If an exception is thrown during the call to `VT`'s constructor, + // `*this` does not contain a value, and any previously contained value has + // been destroyed. The function shall not participate in overload resolution + // unless `is_copy_constructible_v` is `true` and + // `is_constructible_v&, Args...>` is `true`. + template< + typename T, + typename U, + typename... Args, + typename VT = absl::decay_t, + absl::enable_if_t::value && std::is_constructible&, Args...>::value>* = nullptr> + VT& emplace(std::initializer_list ilist, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + reset(); // NOTE: reset() is required here even in the world of exceptions. + Obj* const object_ptr = + new Obj(in_place, ilist, std::forward(args)...); + obj_ = std::unique_ptr(object_ptr); + return object_ptr->value; + } + + // any::reset() + // + // Resets the state of the `absl::any` object, destroying the contained object + // if present. + void reset() noexcept + { + obj_ = nullptr; + } + + // any::swap() + // + // Swaps the passed value and the value of this `absl::any` object. + void swap(any& other) noexcept + { + obj_.swap(other.obj_); + } + + // Observers + + // any::has_value() + // + // Returns `true` if the `any` object has a contained value, otherwise + // returns `false`. + bool has_value() const noexcept + { + return obj_ != nullptr; + } + +#ifdef ABSL_INTERNAL_HAS_RTTI + // Returns: typeid(T) if *this has a contained object of type T, otherwise + // typeid(void). + const std::type_info& type() const noexcept + { + if (has_value()) + { + return obj_->Type(); + } + + return typeid(void); + } +#endif // ABSL_INTERNAL_HAS_RTTI + + private: + // Tagged type-erased abstraction for holding a cloneable object. + class ObjInterface + { + public: + virtual ~ObjInterface() = default; + virtual std::unique_ptr Clone() const = 0; + virtual const void* ObjTypeId() const noexcept = 0; +#ifdef ABSL_INTERNAL_HAS_RTTI + virtual const std::type_info& Type() const noexcept = 0; +#endif // ABSL_INTERNAL_HAS_RTTI + }; + + // Hold a value of some queryable type, with an ability to Clone it. + template + class Obj : public ObjInterface + { + public: + template + explicit Obj(in_place_t /*tag*/, Args&&... args) : + value(std::forward(args)...) + { + } + + std::unique_ptr Clone() const final + { + return std::unique_ptr(new Obj(in_place, value)); + } + + const void* ObjTypeId() const noexcept final + { + return IdForType(); + } + +#ifdef ABSL_INTERNAL_HAS_RTTI + const std::type_info& Type() const noexcept final + { + return typeid(T); + } +#endif // ABSL_INTERNAL_HAS_RTTI + + T value; + }; + + std::unique_ptr CloneObj() const + { + if (!obj_) + return nullptr; + return obj_->Clone(); + } + + template + constexpr static const void* IdForType() + { + // Note: This type dance is to make the behavior consistent with typeid. + using NormalizedType = + typename std::remove_cv::type>::type; + + return base_internal::FastTypeId(); + } + + const void* GetObjTypeId() const + { + return obj_ ? obj_->ObjTypeId() : base_internal::FastTypeId(); + } + + // `absl::any` nonmember functions // + + // Description at the declaration site (top of file). + template + friend ValueType any_cast(const any& operand); + + // Description at the declaration site (top of file). + template + friend ValueType any_cast(any& operand); // NOLINT(runtime/references) + + // Description at the declaration site (top of file). + template + friend const T* any_cast(const any* operand) noexcept; + + // Description at the declaration site (top of file). + template + friend T* any_cast(any* operand) noexcept; + + std::unique_ptr obj_; + }; + + // ----------------------------------------------------------------------------- + // Implementation Details + // ----------------------------------------------------------------------------- + + constexpr any::any() noexcept = default; + + template + struct any::IsInPlaceType : std::false_type + { + }; + + template + struct any::IsInPlaceType> : std::true_type + { + }; + + inline void swap(any& x, any& y) noexcept + { + x.swap(y); + } + + // Description at the declaration site (top of file). + template + any make_any(Args&&... args) + { + return any(in_place_type_t(), std::forward(args)...); + } + + // Description at the declaration site (top of file). + template + any make_any(std::initializer_list il, Args&&... args) + { + return any(in_place_type_t(), il, std::forward(args)...); + } + + // Description at the declaration site (top of file). + template + ValueType any_cast(const any& operand) + { + using U = typename std::remove_cv< + typename std::remove_reference::type>::type; + static_assert(std::is_constructible::value, "Invalid ValueType"); + auto* const result = (any_cast)(&operand); + if (result == nullptr) + { + any_internal::ThrowBadAnyCast(); + } + return static_cast(*result); + } + + // Description at the declaration site (top of file). + template + ValueType any_cast(any& operand) + { // NOLINT(runtime/references) + using U = typename std::remove_cv< + typename std::remove_reference::type>::type; + static_assert(std::is_constructible::value, "Invalid ValueType"); + auto* result = (any_cast)(&operand); + if (result == nullptr) + { + any_internal::ThrowBadAnyCast(); + } + return static_cast(*result); + } + + // Description at the declaration site (top of file). + template + ValueType any_cast(any&& operand) + { + using U = typename std::remove_cv< + typename std::remove_reference::type>::type; + static_assert(std::is_constructible::value, "Invalid ValueType"); + return static_cast(std::move((any_cast)(operand))); + } + + // Description at the declaration site (top of file). + template + const T* any_cast(const any* operand) noexcept + { + using U = + typename std::remove_cv::type>::type; + return operand && operand->GetObjTypeId() == any::IdForType() ? std::addressof( + static_cast*>(operand->obj_.get())->value + ) : + nullptr; + } + + // Description at the declaration site (top of file). + template + T* any_cast(any* operand) noexcept + { + using U = + typename std::remove_cv::type>::type; + return operand && operand->GetObjTypeId() == any::IdForType() ? std::addressof( + static_cast*>(operand->obj_.get())->value + ) : + nullptr; + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USES_STD_ANY + +#endif // ABSL_TYPES_ANY_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/bad_any_cast.h b/CAPI/cpp/grpc/include/absl/types/bad_any_cast.h new file mode 100644 index 00000000..5aba45b9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/bad_any_cast.h @@ -0,0 +1,79 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// bad_any_cast.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::bad_any_cast` type. + +#ifndef ABSL_TYPES_BAD_ANY_CAST_H_ +#define ABSL_TYPES_BAD_ANY_CAST_H_ + +#include + +#include "absl/base/config.h" + +#ifdef ABSL_USES_STD_ANY + +#include + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::bad_any_cast; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_ANY + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // bad_any_cast + // ----------------------------------------------------------------------------- + // + // An `absl::bad_any_cast` type is an exception type that is thrown when + // failing to successfully cast the return value of an `absl::any` object. + // + // Example: + // + // auto a = absl::any(65); + // absl::any_cast(a); // 65 + // try { + // absl::any_cast(a); + // } catch(const absl::bad_any_cast& e) { + // std::cout << "Bad any cast: " << e.what() << '\n'; + // } + class bad_any_cast : public std::bad_cast + { + public: + ~bad_any_cast() override; + const char* what() const noexcept override; + }; + + namespace any_internal + { + + [[noreturn]] void ThrowBadAnyCast(); + + } // namespace any_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USES_STD_ANY + +#endif // ABSL_TYPES_BAD_ANY_CAST_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/bad_optional_access.h b/CAPI/cpp/grpc/include/absl/types/bad_optional_access.h new file mode 100644 index 00000000..e841eed9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/bad_optional_access.h @@ -0,0 +1,82 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// bad_optional_access.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::bad_optional_access` type. + +#ifndef ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_ +#define ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_ + +#include + +#include "absl/base/config.h" + +#ifdef ABSL_USES_STD_OPTIONAL + +#include + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::bad_optional_access; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_OPTIONAL + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // bad_optional_access + // ----------------------------------------------------------------------------- + // + // An `absl::bad_optional_access` type is an exception type that is thrown when + // attempting to access an `absl::optional` object that does not contain a + // value. + // + // Example: + // + // absl::optional o; + // + // try { + // int n = o.value(); + // } catch(const absl::bad_optional_access& e) { + // std::cout << "Bad optional access: " << e.what() << '\n'; + // } + class bad_optional_access : public std::exception + { + public: + bad_optional_access() = default; + ~bad_optional_access() override; + const char* what() const noexcept override; + }; + + namespace optional_internal + { + + // throw delegator + [[noreturn]] ABSL_DLL void throw_bad_optional_access(); + + } // namespace optional_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USES_STD_OPTIONAL + +#endif // ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/bad_variant_access.h b/CAPI/cpp/grpc/include/absl/types/bad_variant_access.h new file mode 100644 index 00000000..12c7ebfe --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/bad_variant_access.h @@ -0,0 +1,86 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// bad_variant_access.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::bad_variant_access` type. + +#ifndef ABSL_TYPES_BAD_VARIANT_ACCESS_H_ +#define ABSL_TYPES_BAD_VARIANT_ACCESS_H_ + +#include + +#include "absl/base/config.h" + +#ifdef ABSL_USES_STD_VARIANT + +#include + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::bad_variant_access; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_VARIANT + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // bad_variant_access + // ----------------------------------------------------------------------------- + // + // An `absl::bad_variant_access` type is an exception type that is thrown in + // the following cases: + // + // * Calling `absl::get(absl::variant) with an index or type that does not + // match the currently selected alternative type + // * Calling `absl::visit on an `absl::variant` that is in the + // `variant::valueless_by_exception` state. + // + // Example: + // + // absl::variant v; + // v = 1; + // try { + // absl::get(v); + // } catch(const absl::bad_variant_access& e) { + // std::cout << "Bad variant access: " << e.what() << '\n'; + // } + class bad_variant_access : public std::exception + { + public: + bad_variant_access() noexcept = default; + ~bad_variant_access() override; + const char* what() const noexcept override; + }; + + namespace variant_internal + { + + [[noreturn]] ABSL_DLL void ThrowBadVariantAccess(); + [[noreturn]] ABSL_DLL void Rethrow(); + + } // namespace variant_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USES_STD_VARIANT + +#endif // ABSL_TYPES_BAD_VARIANT_ACCESS_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/compare.h b/CAPI/cpp/grpc/include/absl/types/compare.h new file mode 100644 index 00000000..f8cfe984 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/compare.h @@ -0,0 +1,689 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// compare.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::weak_equality`, `absl::strong_equality`, +// `absl::partial_ordering`, `absl::weak_ordering`, and `absl::strong_ordering` +// types for storing the results of three way comparisons. +// +// Example: +// absl::weak_ordering compare(const std::string& a, const std::string& b); +// +// These are C++11 compatible versions of the C++20 corresponding types +// (`std::weak_equality`, etc.) and are designed to be drop-in replacements +// for code compliant with C++20. + +#ifndef ABSL_TYPES_COMPARE_H_ +#define ABSL_TYPES_COMPARE_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace compare_internal + { + + using value_type = int8_t; + + class OnlyLiteralZero + { + public: +#if ABSL_HAVE_ATTRIBUTE(enable_if) + // On clang, we can avoid triggering modernize-use-nullptr by only enabling + // this overload when the value is a compile time integer constant equal to 0. + // + // In c++20, this could be a static_assert in a consteval function. + constexpr OnlyLiteralZero(int n) // NOLINT + __attribute__((enable_if(n == 0, "Only literal `0` is allowed."))) + { + } +#else // ABSL_HAVE_ATTRIBUTE(enable_if) + // Accept only literal zero since it can be implicitly converted to a pointer + // to member type. nullptr constants will be caught by the other constructor + // which accepts a nullptr_t. + // + // This constructor is not used for clang since it triggers + // modernize-use-nullptr. + constexpr OnlyLiteralZero(int OnlyLiteralZero::*) noexcept + { + } // NOLINT +#endif + + // Fails compilation when `nullptr` or integral type arguments other than + // `int` are passed. This constructor doesn't accept `int` because literal `0` + // has type `int`. Literal `0` arguments will be implicitly converted to + // `std::nullptr_t` and accepted by the above constructor, while other `int` + // arguments will fail to be converted and cause compilation failure. + template::value || (std::is_integral::value && !std::is_same::value)>::type> + OnlyLiteralZero(T) + { // NOLINT + static_assert(sizeof(T) < 0, "Only literal `0` is allowed."); + } + }; + + enum class eq : value_type + { + equal = 0, + equivalent = equal, + nonequal = 1, + nonequivalent = nonequal, + }; + + enum class ord : value_type + { + less = -1, + greater = 1 + }; + + enum class ncmp : value_type + { + unordered = -127 + }; + +// Define macros to allow for creation or emulation of C++17 inline variables +// based on whether the feature is supported. Note: we can't use +// ABSL_INTERNAL_INLINE_CONSTEXPR here because the variables here are of +// incomplete types so they need to be defined after the types are complete. +#ifdef __cpp_inline_variables + +// A no-op expansion that can be followed by a semicolon at class level. +#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) static_assert(true, "") + +#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) \ + static const type name + +#define ABSL_COMPARE_INLINE_INIT(type, name, init) \ + inline constexpr type type::name(init) + +#else // __cpp_inline_variables + +#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) \ + ABSL_CONST_INIT static const T name + +// A no-op expansion that can be followed by a semicolon at class level. +#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) static_assert(true, "") + +#define ABSL_COMPARE_INLINE_INIT(type, name, init) \ + template \ + const T compare_internal::type##_base::name(init) + +#endif // __cpp_inline_variables + + // These template base classes allow for defining the values of the constants + // in the header file (for performance) without using inline variables (which + // aren't available in C++11). + template + struct weak_equality_base + { + ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent); + ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequivalent); + }; + + template + struct strong_equality_base + { + ABSL_COMPARE_INLINE_BASECLASS_DECL(equal); + ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequal); + ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent); + ABSL_COMPARE_INLINE_BASECLASS_DECL(nonequivalent); + }; + + template + struct partial_ordering_base + { + ABSL_COMPARE_INLINE_BASECLASS_DECL(less); + ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent); + ABSL_COMPARE_INLINE_BASECLASS_DECL(greater); + ABSL_COMPARE_INLINE_BASECLASS_DECL(unordered); + }; + + template + struct weak_ordering_base + { + ABSL_COMPARE_INLINE_BASECLASS_DECL(less); + ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent); + ABSL_COMPARE_INLINE_BASECLASS_DECL(greater); + }; + + template + struct strong_ordering_base + { + ABSL_COMPARE_INLINE_BASECLASS_DECL(less); + ABSL_COMPARE_INLINE_BASECLASS_DECL(equal); + ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent); + ABSL_COMPARE_INLINE_BASECLASS_DECL(greater); + }; + + } // namespace compare_internal + + class weak_equality : public compare_internal::weak_equality_base + { + explicit constexpr weak_equality(compare_internal::eq v) noexcept + : + value_(static_cast(v)) + { + } + friend struct compare_internal::weak_equality_base; + + public: + ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, equivalent); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, nonequivalent); + + // Comparisons + friend constexpr bool operator==( + weak_equality v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ == 0; + } + friend constexpr bool operator!=( + weak_equality v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ != 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, weak_equality v) noexcept + { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, weak_equality v) noexcept + { + return 0 != v.value_; + } + friend constexpr bool operator==(weak_equality v1, weak_equality v2) noexcept + { + return v1.value_ == v2.value_; + } + friend constexpr bool operator!=(weak_equality v1, weak_equality v2) noexcept + { + return v1.value_ != v2.value_; + } + + private: + compare_internal::value_type value_; + }; + ABSL_COMPARE_INLINE_INIT(weak_equality, equivalent, compare_internal::eq::equivalent); + ABSL_COMPARE_INLINE_INIT(weak_equality, nonequivalent, compare_internal::eq::nonequivalent); + + class strong_equality : public compare_internal::strong_equality_base + { + explicit constexpr strong_equality(compare_internal::eq v) noexcept + : + value_(static_cast(v)) + { + } + friend struct compare_internal::strong_equality_base; + + public: + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equal); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequal); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equivalent); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequivalent); + + // Conversion + constexpr operator weak_equality() const noexcept + { // NOLINT + return value_ == 0 ? weak_equality::equivalent : weak_equality::nonequivalent; + } + // Comparisons + friend constexpr bool operator==( + strong_equality v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ == 0; + } + friend constexpr bool operator!=( + strong_equality v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ != 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, strong_equality v) noexcept + { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, strong_equality v) noexcept + { + return 0 != v.value_; + } + friend constexpr bool operator==(strong_equality v1, strong_equality v2) noexcept + { + return v1.value_ == v2.value_; + } + friend constexpr bool operator!=(strong_equality v1, strong_equality v2) noexcept + { + return v1.value_ != v2.value_; + } + + private: + compare_internal::value_type value_; + }; + ABSL_COMPARE_INLINE_INIT(strong_equality, equal, compare_internal::eq::equal); + ABSL_COMPARE_INLINE_INIT(strong_equality, nonequal, compare_internal::eq::nonequal); + ABSL_COMPARE_INLINE_INIT(strong_equality, equivalent, compare_internal::eq::equivalent); + ABSL_COMPARE_INLINE_INIT(strong_equality, nonequivalent, compare_internal::eq::nonequivalent); + + class partial_ordering : public compare_internal::partial_ordering_base + { + explicit constexpr partial_ordering(compare_internal::eq v) noexcept + : + value_(static_cast(v)) + { + } + explicit constexpr partial_ordering(compare_internal::ord v) noexcept + : + value_(static_cast(v)) + { + } + explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept + : + value_(static_cast(v)) + { + } + friend struct compare_internal::partial_ordering_base; + + constexpr bool is_ordered() const noexcept + { + return value_ != + compare_internal::value_type(compare_internal::ncmp::unordered); + } + + public: + ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered); + + // Conversion + constexpr operator weak_equality() const noexcept + { // NOLINT + return value_ == 0 ? weak_equality::equivalent : weak_equality::nonequivalent; + } + // Comparisons + friend constexpr bool operator==( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.is_ordered() && v.value_ == 0; + } + friend constexpr bool operator!=( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return !v.is_ordered() || v.value_ != 0; + } + friend constexpr bool operator<( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.is_ordered() && v.value_ < 0; + } + friend constexpr bool operator<=( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.is_ordered() && v.value_ <= 0; + } + friend constexpr bool operator>( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.is_ordered() && v.value_ > 0; + } + friend constexpr bool operator>=( + partial_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.is_ordered() && v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return v.is_ordered() && 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return !v.is_ordered() || 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return v.is_ordered() && 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return v.is_ordered() && 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return v.is_ordered() && 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept + { + return v.is_ordered() && 0 >= v.value_; + } + friend constexpr bool operator==(partial_ordering v1, partial_ordering v2) noexcept + { + return v1.value_ == v2.value_; + } + friend constexpr bool operator!=(partial_ordering v1, partial_ordering v2) noexcept + { + return v1.value_ != v2.value_; + } + + private: + compare_internal::value_type value_; + }; + ABSL_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less); + ABSL_COMPARE_INLINE_INIT(partial_ordering, equivalent, compare_internal::eq::equivalent); + ABSL_COMPARE_INLINE_INIT(partial_ordering, greater, compare_internal::ord::greater); + ABSL_COMPARE_INLINE_INIT(partial_ordering, unordered, compare_internal::ncmp::unordered); + + class weak_ordering : public compare_internal::weak_ordering_base + { + explicit constexpr weak_ordering(compare_internal::eq v) noexcept + : + value_(static_cast(v)) + { + } + explicit constexpr weak_ordering(compare_internal::ord v) noexcept + : + value_(static_cast(v)) + { + } + friend struct compare_internal::weak_ordering_base; + + public: + ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater); + + // Conversions + constexpr operator weak_equality() const noexcept + { // NOLINT + return value_ == 0 ? weak_equality::equivalent : weak_equality::nonequivalent; + } + constexpr operator partial_ordering() const noexcept + { // NOLINT + return value_ == 0 ? partial_ordering::equivalent : (value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + // Comparisons + friend constexpr bool operator==( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ == 0; + } + friend constexpr bool operator!=( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ != 0; + } + friend constexpr bool operator<( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ < 0; + } + friend constexpr bool operator<=( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ <= 0; + } + friend constexpr bool operator>( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ > 0; + } + friend constexpr bool operator>=( + weak_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept + { + return 0 >= v.value_; + } + friend constexpr bool operator==(weak_ordering v1, weak_ordering v2) noexcept + { + return v1.value_ == v2.value_; + } + friend constexpr bool operator!=(weak_ordering v1, weak_ordering v2) noexcept + { + return v1.value_ != v2.value_; + } + + private: + compare_internal::value_type value_; + }; + ABSL_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less); + ABSL_COMPARE_INLINE_INIT(weak_ordering, equivalent, compare_internal::eq::equivalent); + ABSL_COMPARE_INLINE_INIT(weak_ordering, greater, compare_internal::ord::greater); + + class strong_ordering : public compare_internal::strong_ordering_base + { + explicit constexpr strong_ordering(compare_internal::eq v) noexcept + : + value_(static_cast(v)) + { + } + explicit constexpr strong_ordering(compare_internal::ord v) noexcept + : + value_(static_cast(v)) + { + } + friend struct compare_internal::strong_ordering_base; + + public: + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent); + ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater); + + // Conversions + constexpr operator weak_equality() const noexcept + { // NOLINT + return value_ == 0 ? weak_equality::equivalent : weak_equality::nonequivalent; + } + constexpr operator strong_equality() const noexcept + { // NOLINT + return value_ == 0 ? strong_equality::equal : strong_equality::nonequal; + } + constexpr operator partial_ordering() const noexcept + { // NOLINT + return value_ == 0 ? partial_ordering::equivalent : (value_ < 0 ? partial_ordering::less : partial_ordering::greater); + } + constexpr operator weak_ordering() const noexcept + { // NOLINT + return value_ == 0 ? weak_ordering::equivalent : (value_ < 0 ? weak_ordering::less : weak_ordering::greater); + } + // Comparisons + friend constexpr bool operator==( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ == 0; + } + friend constexpr bool operator!=( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ != 0; + } + friend constexpr bool operator<( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ < 0; + } + friend constexpr bool operator<=( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ <= 0; + } + friend constexpr bool operator>( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ > 0; + } + friend constexpr bool operator>=( + strong_ordering v, compare_internal::OnlyLiteralZero + ) noexcept + { + return v.value_ >= 0; + } + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 == v.value_; + } + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 != v.value_; + } + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 < v.value_; + } + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 <= v.value_; + } + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 > v.value_; + } + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept + { + return 0 >= v.value_; + } + friend constexpr bool operator==(strong_ordering v1, strong_ordering v2) noexcept + { + return v1.value_ == v2.value_; + } + friend constexpr bool operator!=(strong_ordering v1, strong_ordering v2) noexcept + { + return v1.value_ != v2.value_; + } + + private: + compare_internal::value_type value_; + }; + ABSL_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less); + ABSL_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal); + ABSL_COMPARE_INLINE_INIT(strong_ordering, equivalent, compare_internal::eq::equivalent); + ABSL_COMPARE_INLINE_INIT(strong_ordering, greater, compare_internal::ord::greater); + +#undef ABSL_COMPARE_INLINE_BASECLASS_DECL +#undef ABSL_COMPARE_INLINE_SUBCLASS_DECL +#undef ABSL_COMPARE_INLINE_INIT + + namespace compare_internal + { + // We also provide these comparator adapter functions for internal absl use. + + // Helper functions to do a boolean comparison of two keys given a boolean + // or three-way comparator. + // SFINAE prevents implicit conversions to bool (such as from int). + template::value, int> = 0> + constexpr bool compare_result_as_less_than(const BoolT r) + { + return r; + } + constexpr bool compare_result_as_less_than(const absl::weak_ordering r) + { + return r < 0; + } + + template + constexpr bool do_less_than_comparison(const Compare& compare, const K& x, const LK& y) + { + return compare_result_as_less_than(compare(x, y)); + } + + // Helper functions to do a three-way comparison of two keys given a boolean or + // three-way comparator. + // SFINAE prevents implicit conversions to int (such as from bool). + template::value, int> = 0> + constexpr absl::weak_ordering compare_result_as_ordering(const Int c) + { + return c < 0 ? absl::weak_ordering::less : c == 0 ? absl::weak_ordering::equivalent : + absl::weak_ordering::greater; + } + constexpr absl::weak_ordering compare_result_as_ordering( + const absl::weak_ordering c + ) + { + return c; + } + + template< + typename Compare, + typename K, + typename LK, + absl::enable_if_t>::value, int> = 0> + constexpr absl::weak_ordering do_three_way_comparison(const Compare& compare, const K& x, const LK& y) + { + return compare_result_as_ordering(compare(x, y)); + } + template< + typename Compare, + typename K, + typename LK, + absl::enable_if_t>::value, int> = 0> + constexpr absl::weak_ordering do_three_way_comparison(const Compare& compare, const K& x, const LK& y) + { + return compare(x, y) ? absl::weak_ordering::less : compare(y, x) ? absl::weak_ordering::greater : + absl::weak_ordering::equivalent; + } + + } // namespace compare_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_COMPARE_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/conformance_aliases.h b/CAPI/cpp/grpc/include/absl/types/internal/conformance_aliases.h new file mode 100644 index 00000000..e246cbed --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/conformance_aliases.h @@ -0,0 +1,488 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// regularity_aliases.h +// ----------------------------------------------------------------------------- +// +// This file contains type aliases of common ConformanceProfiles and Archetypes +// so that they can be directly used by name without creating them from scratch. + +#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_ +#define ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_ + +#include "absl/types/internal/conformance_archetype.h" +#include "absl/types/internal/conformance_profile.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace types_internal + { + +// Creates both a Profile and a corresponding Archetype with root name "name". +#define ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS(name, ...) \ + struct name##Profile : __VA_ARGS__ \ + { \ + }; \ + \ + using name##Archetype = ::absl::types_internal::Archetype; \ + \ + template \ + using name##Archetype##_ = ::absl::types_internal::Archetype< \ + ::absl::types_internal::StrongProfileTypedef> + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialDefaultConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowDefaultConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasDefaultConstructor, ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialMoveConstructor, ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowMoveConstructor, ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasMoveConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialCopyConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowCopyConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasCopyConstructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialMoveAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowMoveAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasMoveAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialCopyAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowCopyAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasCopyAssign, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasTrivialDestructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowDestructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasDestructor, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowEquality, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasEquality, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowInequality, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasInequality, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowLessThan, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasLessThan, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowLessEqual, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasLessEqual, + ConformanceProfile + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowGreaterEqual, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::nothrow> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasGreaterEqual, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::yes> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowGreaterThan, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::maybe, + greater_than_comparable::nothrow> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasGreaterThan, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::maybe, + greater_than_comparable::yes> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasNothrowSwap, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::maybe, + greater_than_comparable::maybe, + swappable::nothrow> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasSwap, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::maybe, + greater_than_comparable::maybe, + swappable::yes> + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HasStdHashSpecialization, + ConformanceProfile< + default_constructible::maybe, + move_constructible::maybe, + copy_constructible::maybe, + move_assignable::maybe, + copy_assignable::maybe, + destructible::maybe, + equality_comparable::maybe, + inequality_comparable::maybe, + less_than_comparable::maybe, + less_equal_comparable::maybe, + greater_equal_comparable::maybe, + greater_than_comparable::maybe, + swappable::maybe, + hashable::yes> + ); + + //////////////////////////////////////////////////////////////////////////////// + //// The remaining aliases are combinations of the previous aliases. //// + //////////////////////////////////////////////////////////////////////////////// + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + Equatable, CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + Comparable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + NothrowEquatable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + NothrowComparable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + Value, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + EquatableValue, CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + ComparableValue, CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + DefaultConstructibleValue, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + NothrowMoveConstructible, CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + EquatableNothrowMoveConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + ComparableNothrowMoveConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + DefaultConstructibleNothrowMoveConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + CopyConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + EquatableCopyConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + ComparableCopyConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + DefaultConstructibleCopyConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + NothrowMovable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + EquatableNothrowMovable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + ComparableNothrowMovable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + DefaultConstructibleNothrowMovable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + TrivialSpecialMemberFunctions, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + TriviallyComplete, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HashableNothrowMoveConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HashableCopyConstructible, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HashableNothrowMovable, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + HashableValue, + CombineProfiles + ); + + ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS( + ComparableHashableValue, + CombineProfiles + ); + + // The "preferred" profiles that we support in Abseil. + template class Receiver> + using ExpandBasicProfiles = + Receiver; + + // The basic profiles except that they are also all Equatable. + template class Receiver> + using ExpandBasicEquatableProfiles = + Receiver; + + // The basic profiles except that they are also all Comparable. + template class Receiver> + using ExpandBasicComparableProfiles = + Receiver; + + // The basic profiles except that they are also all Hashable. + template class Receiver> + using ExpandBasicHashableProfiles = + Receiver; + + // The basic profiles except that they are also all DefaultConstructible. + template class Receiver> + using ExpandBasicDefaultConstructibleProfiles = + Receiver; + + // The type profiles that we support in Abseil (all of the previous lists). + template class Receiver> + using ExpandSupportedProfiles = Receiver< + NothrowMoveConstructibleProfile, + CopyConstructibleProfile, + NothrowMovableProfile, + ValueProfile, + EquatableNothrowMoveConstructibleProfile, + EquatableCopyConstructibleProfile, + EquatableNothrowMovableProfile, + EquatableValueProfile, + ComparableNothrowMoveConstructibleProfile, + ComparableCopyConstructibleProfile, + ComparableNothrowMovableProfile, + ComparableValueProfile, + DefaultConstructibleNothrowMoveConstructibleProfile, + DefaultConstructibleCopyConstructibleProfile, + DefaultConstructibleNothrowMovableProfile, + DefaultConstructibleValueProfile, + HashableNothrowMoveConstructibleProfile, + HashableCopyConstructibleProfile, + HashableNothrowMovableProfile, + HashableValueProfile>; + + // TODO(calabrese) Include types that have throwing move constructors, since in + // practice we still need to support them because of standard library types with + // (potentially) non-noexcept moves. + + } // namespace types_internal + ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_PROFILE_AND_ARCHETYPE_ALIAS + +#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_ALIASES_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/conformance_archetype.h b/CAPI/cpp/grpc/include/absl/types/internal/conformance_archetype.h new file mode 100644 index 00000000..45b77c07 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/conformance_archetype.h @@ -0,0 +1,873 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// conformance_archetype.h +// ----------------------------------------------------------------------------- +// +// This file contains a facility for generating "archetypes" of out of +// "Conformance Profiles" (see "conformance_profiles.h" for more information +// about Conformance Profiles). An archetype is a type that aims to support the +// bare minimum requirements of a given Conformance Profile. For instance, an +// archetype that corresponds to an ImmutableProfile has exactly a nothrow +// move-constructor, a potentially-throwing copy constructor, a nothrow +// destructor, with all other special-member-functions deleted. These archetypes +// are useful for testing to make sure that templates are able to work with the +// kinds of types that they claim to support (i.e. that they do not accidentally +// under-constrain), +// +// The main type template in this file is the Archetype template, which takes +// a Conformance Profile as a template argument and its instantiations are a +// minimum-conforming model of that profile. + +#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_ +#define ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_ + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/types/internal/conformance_profile.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace types_internal + { + + // A minimum-conforming implementation of a type with properties specified in + // `Prof`, where `Prof` is a valid Conformance Profile. + template + class Archetype; + + // Given an Archetype, obtain the properties of the profile associated with that + // archetype. + template + struct PropertiesOfArchetype; + + template + struct PropertiesOfArchetype> + { + using type = PropertiesOfT; + }; + + template + using PropertiesOfArchetypeT = typename PropertiesOfArchetype::type; + + // A metafunction to determine if a type is an `Archetype`. + template + struct IsArchetype : std::false_type + { + }; + + template + struct IsArchetype> : std::true_type + { + }; + + // A constructor tag type used when creating an Archetype with internal state. + struct MakeArchetypeState + { + }; + + // Data stored within an archetype that is copied/compared/hashed when the + // corresponding operations are used. + using ArchetypeState = std::size_t; + + //////////////////////////////////////////////////////////////////////////////// + // This section of the file defines a chain of base classes for Archetype, // + // where each base defines a specific special member function with the // + // appropriate properties (deleted, noexcept(false), noexcept, or trivial). // + //////////////////////////////////////////////////////////////////////////////// + + // The bottom-most base, which contains the state and the default constructor. + template + struct ArchetypeStateBase + { + static_assert(DefaultConstructibleValue == default_constructible::yes || DefaultConstructibleValue == default_constructible::nothrow, ""); + + ArchetypeStateBase() noexcept( + DefaultConstructibleValue == + default_constructible:: + nothrow + ) /*Vacuous archetype_state initialization*/ + { + } + explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept + : + archetype_state(state) + { + } + + ArchetypeState archetype_state; + }; + + template<> + struct ArchetypeStateBase + { + explicit ArchetypeStateBase() = delete; + explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept + : + archetype_state(state) + { + } + + ArchetypeState archetype_state; + }; + + template<> + struct ArchetypeStateBase + { + ArchetypeStateBase() = default; + explicit ArchetypeStateBase(MakeArchetypeState, ArchetypeState state) noexcept + : + archetype_state(state) + { + } + + ArchetypeState archetype_state; + }; + + // The move-constructor base + template + struct ArchetypeMoveConstructor : ArchetypeStateBase + { + static_assert(MoveConstructibleValue == move_constructible::yes || MoveConstructibleValue == move_constructible::nothrow, ""); + + explicit ArchetypeMoveConstructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeStateBase(MakeArchetypeState(), state) + { + } + + ArchetypeMoveConstructor() = default; + ArchetypeMoveConstructor(ArchetypeMoveConstructor&& other) noexcept( + MoveConstructibleValue == move_constructible::nothrow + ) : + ArchetypeStateBase(MakeArchetypeState(), other.archetype_state) + { + } + ArchetypeMoveConstructor(const ArchetypeMoveConstructor&) = default; + ArchetypeMoveConstructor& operator=(ArchetypeMoveConstructor&&) = default; + ArchetypeMoveConstructor& operator=(const ArchetypeMoveConstructor&) = + default; + }; + + template + struct ArchetypeMoveConstructor : ArchetypeStateBase + { + explicit ArchetypeMoveConstructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeStateBase(MakeArchetypeState(), state) + { + } + + ArchetypeMoveConstructor() = default; + }; + + // The copy-constructor base + template + struct ArchetypeCopyConstructor : ArchetypeMoveConstructor + { + static_assert(CopyConstructibleValue == copy_constructible::yes || CopyConstructibleValue == copy_constructible::nothrow, ""); + explicit ArchetypeCopyConstructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveConstructor(MakeArchetypeState(), state) + { + } + + ArchetypeCopyConstructor() = default; + ArchetypeCopyConstructor(ArchetypeCopyConstructor&&) = default; + ArchetypeCopyConstructor(const ArchetypeCopyConstructor& other) noexcept( + CopyConstructibleValue == copy_constructible::nothrow + ) : + ArchetypeMoveConstructor( + MakeArchetypeState(), other.archetype_state + ) + { + } + ArchetypeCopyConstructor& operator=(ArchetypeCopyConstructor&&) = default; + ArchetypeCopyConstructor& operator=(const ArchetypeCopyConstructor&) = + default; + }; + + template + struct ArchetypeCopyConstructor : ArchetypeMoveConstructor + { + explicit ArchetypeCopyConstructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveConstructor(MakeArchetypeState(), state) + { + } + + ArchetypeCopyConstructor() = default; + ArchetypeCopyConstructor(ArchetypeCopyConstructor&&) = default; + ArchetypeCopyConstructor(const ArchetypeCopyConstructor&) = delete; + ArchetypeCopyConstructor& operator=(ArchetypeCopyConstructor&&) = default; + ArchetypeCopyConstructor& operator=(const ArchetypeCopyConstructor&) = + default; + }; + + template + struct ArchetypeCopyConstructor : ArchetypeMoveConstructor + { + explicit ArchetypeCopyConstructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveConstructor(MakeArchetypeState(), state) + { + } + + ArchetypeCopyConstructor() = default; + }; + + // The move-assign base + template + struct ArchetypeMoveAssign : ArchetypeCopyConstructor + { + static_assert(MoveAssignableValue == move_assignable::yes || MoveAssignableValue == move_assignable::nothrow, ""); + explicit ArchetypeMoveAssign(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeCopyConstructor(MakeArchetypeState(), state) + { + } + + ArchetypeMoveAssign() = default; + ArchetypeMoveAssign(ArchetypeMoveAssign&&) = default; + ArchetypeMoveAssign(const ArchetypeMoveAssign&) = default; + ArchetypeMoveAssign& operator=(ArchetypeMoveAssign&& other) noexcept( + MoveAssignableValue == move_assignable::nothrow + ) + { + this->archetype_state = other.archetype_state; + return *this; + } + + ArchetypeMoveAssign& operator=(const ArchetypeMoveAssign&) = default; + }; + + template + struct ArchetypeMoveAssign : ArchetypeCopyConstructor + { + explicit ArchetypeMoveAssign(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeCopyConstructor(MakeArchetypeState(), state) + { + } + + ArchetypeMoveAssign() = default; + }; + + // The copy-assign base + template + struct ArchetypeCopyAssign : ArchetypeMoveAssign + { + static_assert(CopyAssignableValue == copy_assignable::yes || CopyAssignableValue == copy_assignable::nothrow, ""); + explicit ArchetypeCopyAssign(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveAssign( + MakeArchetypeState(), state + ) + { + } + + ArchetypeCopyAssign() = default; + ArchetypeCopyAssign(ArchetypeCopyAssign&&) = default; + ArchetypeCopyAssign(const ArchetypeCopyAssign&) = default; + ArchetypeCopyAssign& operator=(ArchetypeCopyAssign&&) = default; + + ArchetypeCopyAssign& operator=(const ArchetypeCopyAssign& other) noexcept( + CopyAssignableValue == copy_assignable::nothrow + ) + { + this->archetype_state = other.archetype_state; + return *this; + } + }; + + template + struct ArchetypeCopyAssign : ArchetypeMoveAssign + { + explicit ArchetypeCopyAssign(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveAssign( + MakeArchetypeState(), state + ) + { + } + + ArchetypeCopyAssign() = default; + ArchetypeCopyAssign(ArchetypeCopyAssign&&) = default; + ArchetypeCopyAssign(const ArchetypeCopyAssign&) = default; + ArchetypeCopyAssign& operator=(ArchetypeCopyAssign&&) = default; + ArchetypeCopyAssign& operator=(const ArchetypeCopyAssign&) = delete; + }; + + template + struct ArchetypeCopyAssign : ArchetypeMoveAssign + { + explicit ArchetypeCopyAssign(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeMoveAssign( + MakeArchetypeState(), state + ) + { + } + + ArchetypeCopyAssign() = default; + }; + + // The destructor base + template + struct ArchetypeDestructor : ArchetypeCopyAssign + { + static_assert(DestructibleValue == destructible::yes || DestructibleValue == destructible::nothrow, ""); + + explicit ArchetypeDestructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeCopyAssign(MakeArchetypeState(), state) + { + } + + ArchetypeDestructor() = default; + ArchetypeDestructor(ArchetypeDestructor&&) = default; + ArchetypeDestructor(const ArchetypeDestructor&) = default; + ArchetypeDestructor& operator=(ArchetypeDestructor&&) = default; + ArchetypeDestructor& operator=(const ArchetypeDestructor&) = default; + ~ArchetypeDestructor() noexcept(DestructibleValue == destructible::nothrow) + { + } + }; + + template + struct ArchetypeDestructor : ArchetypeCopyAssign + { + explicit ArchetypeDestructor(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeCopyAssign(MakeArchetypeState(), state) + { + } + + ArchetypeDestructor() = default; + }; + + // An alias to the top of the chain of bases for special-member functions. + // NOTE: move_constructible::maybe, move_assignable::maybe, and + // destructible::maybe are handled in the top-level type by way of SFINAE. + // Because of this, we never instantiate the base classes with + // move_constructible::maybe, move_assignable::maybe, or destructible::maybe so + // that we minimize the number of different possible type-template + // instantiations. + template + using ArchetypeSpecialMembersBase = ArchetypeDestructor< + DefaultConstructibleValue, + MoveConstructibleValue != move_constructible::maybe ? MoveConstructibleValue : move_constructible::nothrow, + CopyConstructibleValue, + MoveAssignableValue != move_assignable::maybe ? MoveAssignableValue : move_assignable::nothrow, + CopyAssignableValue, + DestructibleValue != destructible::maybe ? DestructibleValue : destructible::nothrow>; + + // A function that is used to create an archetype with some associated state. + template + Arch MakeArchetype(ArchetypeState state) noexcept + { + static_assert(IsArchetype::value, "The explicit template argument to MakeArchetype is required " + "to be an Archetype."); + return Arch(MakeArchetypeState(), state); + } + + // This is used to conditionally delete "copy" and "move" constructors in a way + // that is consistent with what the ConformanceProfile requires and that also + // strictly enforces the arguments to the copy/move to not come from implicit + // conversions when dealing with the Archetype. + template + constexpr bool ShouldDeleteConstructor() + { + return !((PropertiesOfT::move_constructible_support != move_constructible::maybe && std::is_same>::value) || (PropertiesOfT::copy_constructible_support != copy_constructible::maybe && (std::is_same&>::value || std::is_same&>::value || std::is_same>::value))); + } + + // This is used to conditionally delete "copy" and "move" assigns in a way + // that is consistent with what the ConformanceProfile requires and that also + // strictly enforces the arguments to the copy/move to not come from implicit + // conversions when dealing with the Archetype. + template + constexpr bool ShouldDeleteAssign() + { + return !( + (PropertiesOfT::move_assignable_support != move_assignable::maybe && + std::is_same>::value) || + (PropertiesOfT::copy_assignable_support != copy_assignable::maybe && + (std::is_same&>::value || + std::is_same&>::value || + std::is_same>::value)) + ); + } + + // TODO(calabrese) Inherit from a chain of secondary bases to pull in the + // associated functions of other concepts. + template + class Archetype : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + static_assert(std::is_same::value, "An explicit type must not be passed as the second template " + "argument to 'Archetype`."); + + // The cases mentioned in these static_asserts are expected to be handled in + // the partial template specializations of Archetype that follow this + // definition. + static_assert(PropertiesOfT::destructible_support != destructible::maybe, ""); + static_assert(PropertiesOfT::move_constructible_support != move_constructible::maybe || PropertiesOfT::copy_constructible_support == copy_constructible::maybe, ""); + static_assert(PropertiesOfT::move_assignable_support != move_assignable::maybe || PropertiesOfT::copy_assignable_support == copy_assignable::maybe, ""); + + public: + Archetype() = default; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support != move_constructible::maybe && PropertiesOfT::move_assignable_support == move_assignable::maybe && PropertiesOfT::destructible_support != destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = default; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = delete; + Archetype& operator=(const Archetype&) = default; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support == move_constructible::maybe && PropertiesOfT::move_assignable_support == move_assignable::maybe && PropertiesOfT::destructible_support != destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = delete; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = delete; + Archetype& operator=(const Archetype&) = default; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support == move_constructible::maybe && PropertiesOfT::move_assignable_support != move_assignable::maybe && PropertiesOfT::destructible_support != destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = delete; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = default; + Archetype& operator=(const Archetype&) = default; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support != move_constructible::maybe && PropertiesOfT::move_assignable_support == move_assignable::maybe && PropertiesOfT::destructible_support == destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = default; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = delete; + Archetype& operator=(const Archetype&) = default; + ~Archetype() = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support == move_constructible::maybe && PropertiesOfT::move_assignable_support == move_assignable::maybe && PropertiesOfT::destructible_support == destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = delete; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = delete; + Archetype& operator=(const Archetype&) = default; + ~Archetype() = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + template + class Archetype::move_constructible_support == move_constructible::maybe && PropertiesOfT::move_assignable_support != move_assignable::maybe && PropertiesOfT::destructible_support == destructible::maybe>::type> : ArchetypeSpecialMembersBase::default_constructible_support, PropertiesOfT::move_constructible_support, PropertiesOfT::copy_constructible_support, PropertiesOfT::move_assignable_support, PropertiesOfT::copy_assignable_support, PropertiesOfT::destructible_support> + { + public: + Archetype() = default; + Archetype(Archetype&&) = delete; + Archetype(const Archetype&) = default; + Archetype& operator=(Archetype&&) = default; + Archetype& operator=(const Archetype&) = default; + ~Archetype() = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype(T&&) = delete; + + // Disallow moves when requested, and disallow implicit conversions. + template()>::type* = nullptr> + Archetype& operator=(T&&) = delete; + + using ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>::archetype_state; + + private: + explicit Archetype(MakeArchetypeState, ArchetypeState state) noexcept + : + ArchetypeSpecialMembersBase< + PropertiesOfT::default_constructible_support, + PropertiesOfT::move_constructible_support, + PropertiesOfT::copy_constructible_support, + PropertiesOfT::move_assignable_support, + PropertiesOfT::copy_assignable_support, + PropertiesOfT::destructible_support>(MakeArchetypeState(), state) + { + } + + friend Archetype MakeArchetype(ArchetypeState) noexcept; + }; + + // Explicitly deleted swap for Archetype if the profile does not require swap. + // It is important to delete it rather than simply leave it out so that the + // "using std::swap;" idiom will result in this deleted overload being picked. + template::is_swappable, int> = 0> + void swap(Archetype&, Archetype&) = delete; // NOLINT + + // A conditionally-noexcept swap implementation for Archetype when the profile + // supports swap. + template::is_swappable, int> = 0> + void swap(Archetype& lhs, Archetype& rhs) // NOLINT + noexcept(PropertiesOfT::swappable_support != swappable::yes) + { + std::swap(lhs.archetype_state, rhs.archetype_state); + } + + // A convertible-to-bool type that is used as the return type of comparison + // operators since the standard doesn't always require exactly bool. + struct NothrowBool + { + explicit NothrowBool() = delete; + ~NothrowBool() = default; + + // TODO(calabrese) Delete the copy constructor in C++17 mode since guaranteed + // elision makes it not required when returning from a function. + // NothrowBool(NothrowBool const&) = delete; + + NothrowBool& operator=(NothrowBool const&) = delete; + + explicit operator bool() const noexcept + { + return value; + } + + static NothrowBool make(bool const value) noexcept + { + return NothrowBool(value); + } + + private: + explicit NothrowBool(bool const value) noexcept : + value(value) + { + } + + bool value; + }; + + // A convertible-to-bool type that is used as the return type of comparison + // operators since the standard doesn't always require exactly bool. + // Note: ExceptionalBool has a conversion operator that is not noexcept, so + // that even when a comparison operator is noexcept, that operation may still + // potentially throw when converted to bool. + struct ExceptionalBool + { + explicit ExceptionalBool() = delete; + ~ExceptionalBool() = default; + + // TODO(calabrese) Delete the copy constructor in C++17 mode since guaranteed + // elision makes it not required when returning from a function. + // ExceptionalBool(ExceptionalBool const&) = delete; + + ExceptionalBool& operator=(ExceptionalBool const&) = delete; + + explicit operator bool() const + { + return value; + } // NOLINT + + static ExceptionalBool make(bool const value) noexcept + { + return ExceptionalBool(value); + } + + private: + explicit ExceptionalBool(bool const value) noexcept : + value(value) + { + } + + bool value; + }; + +// The following macro is only used as a helper in this file to stamp out +// comparison operator definitions. It is undefined after usage. +// +// NOTE: Non-nothrow operators throw via their result's conversion to bool even +// though the operation itself is noexcept. +#define ABSL_TYPES_INTERNAL_OP(enum_name, op) \ + template \ + absl::enable_if_t::is_##enum_name, bool> operator op( \ + const Archetype&, const Archetype& \ + ) = delete; \ + \ + template \ + typename absl::enable_if_t< \ + PropertiesOfT::is_##enum_name, \ + std::conditional::enum_name##_support == enum_name::nothrow, NothrowBool, ExceptionalBool>>::type \ + operator op(const Archetype& lhs, const Archetype& rhs) noexcept \ + { \ + return absl::conditional_t< \ + PropertiesOfT::enum_name##_support == enum_name::nothrow, \ + NothrowBool, \ + ExceptionalBool>::make(lhs.archetype_state op \ + rhs.archetype_state); \ + } + + ABSL_TYPES_INTERNAL_OP(equality_comparable, ==); + ABSL_TYPES_INTERNAL_OP(inequality_comparable, !=); + ABSL_TYPES_INTERNAL_OP(less_than_comparable, <); + ABSL_TYPES_INTERNAL_OP(less_equal_comparable, <=); + ABSL_TYPES_INTERNAL_OP(greater_equal_comparable, >=); + ABSL_TYPES_INTERNAL_OP(greater_than_comparable, >); + +#undef ABSL_TYPES_INTERNAL_OP + + // Base class for std::hash specializations when an Archetype doesn't support + // hashing. + struct PoisonedHash + { + PoisonedHash() = delete; + PoisonedHash(const PoisonedHash&) = delete; + PoisonedHash& operator=(const PoisonedHash&) = delete; + }; + + // Base class for std::hash specializations when an Archetype supports hashing. + template + struct EnabledHash + { + using argument_type = Archetype; + using result_type = std::size_t; + result_type operator()(const argument_type& arg) const + { + return std::hash()(arg.archetype_state); + } + }; + + } // namespace types_internal + ABSL_NAMESPACE_END +} // namespace absl + +namespace std +{ + + template // NOLINT + struct hash<::absl::types_internal::Archetype> : conditional<::absl::types_internal::PropertiesOfT::is_hashable, ::absl::types_internal::EnabledHash, ::absl::types_internal::PoisonedHash>::type + { + }; + +} // namespace std + +#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_ARCHETYPE_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/conformance_profile.h b/CAPI/cpp/grpc/include/absl/types/internal/conformance_profile.h new file mode 100644 index 00000000..932f498f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/conformance_profile.h @@ -0,0 +1,1001 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// conformance_profiles.h +// ----------------------------------------------------------------------------- +// +// This file contains templates for representing "Regularity Profiles" and +// concisely-named versions of commonly used Regularity Profiles. +// +// A Regularity Profile is a compile-time description of the types of operations +// that a given type supports, along with properties of those operations when +// they do exist. For instance, a Regularity Profile may describe a type that +// has a move-constructor that is noexcept and a copy constructor that is not +// noexcept. This description can then be examined and passed around to other +// templates for the purposes of asserting expectations on user-defined types +// via a series trait checks, or for determining what kinds of run-time tests +// are able to be performed. +// +// Regularity Profiles are also used when creating "archetypes," which are +// minimum-conforming types that meet all of the requirements of a given +// Regularity Profile. For more information regarding archetypes, see +// "conformance_archetypes.h". + +#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_ +#define ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_ + +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/algorithm/container.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/ascii.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/types/internal/conformance_testing_helpers.h" +#include "absl/utility/utility.h" + +// TODO(calabrese) Add support for extending profiles. + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace types_internal + { + + // Converts an enum to its underlying integral value. + template + constexpr absl::underlying_type_t UnderlyingValue(Enum value) + { + return static_cast>(value); + } + + // A tag type used in place of a matcher when checking that an assertion result + // does not actually contain any errors. + struct NoError + { + }; + + // ----------------------------------------------------------------------------- + // ConformanceErrors + // ----------------------------------------------------------------------------- + class ConformanceErrors + { + public: + // Setup the error reporting mechanism by seeding it with the name of the type + // that is being tested. + explicit ConformanceErrors(std::string type_name) : + assertion_result_(false), + type_name_(std::move(type_name)) + { + assertion_result_ << "\n\n" + "Assuming the following type alias:\n" + "\n" + " using _T = " + << type_name_ << ";\n\n"; + outputDivider(); + } + + // Adds the test name to the list of successfully run tests iff it was not + // previously reported as failing. This behavior is useful for tests that + // have multiple parts, where failures and successes are reported individually + // with the same test name. + void addTestSuccess(absl::string_view test_name) + { + auto normalized_test_name = absl::AsciiStrToLower(test_name); + + // If the test is already reported as failing, do not add it to the list of + // successes. + if (test_failures_.find(normalized_test_name) == test_failures_.end()) + { + test_successes_.insert(std::move(normalized_test_name)); + } + } + + // Streams a single error description into the internal buffer (a visual + // divider is automatically inserted after the error so that multiple errors + // are visibly distinct). + // + // This function increases the error count by 1. + // + // TODO(calabrese) Determine desired behavior when if this function throws. + template + void addTestFailure(absl::string_view test_name, const P&... args) + { + // Output a message related to the test failure. + assertion_result_ << "\n\n" + "Failed test: " + << test_name << "\n\n"; + addTestFailureImpl(args...); + assertion_result_ << "\n\n"; + outputDivider(); + + auto normalized_test_name = absl::AsciiStrToLower(test_name); + + // If previous parts of this test succeeded, remove it from that set. + test_successes_.erase(normalized_test_name); + + // Add the test name to the list of failed tests. + test_failures_.insert(std::move(normalized_test_name)); + + has_error_ = true; + } + + // Convert this object into a testing::AssertionResult instance such that it + // can be used with gtest. + ::testing::AssertionResult assertionResult() const + { + return has_error_ ? assertion_result_ : ::testing::AssertionSuccess(); + } + + // Convert this object into a testing::AssertionResult instance such that it + // can be used with gtest. This overload expects errors, using the specified + // matcher. + ::testing::AssertionResult expectFailedTests( + const std::set& test_names + ) const + { + // Since we are expecting nonconformance, output an error message when the + // type actually conformed to the specified profile. + if (!has_error_) + { + return ::testing::AssertionFailure() + << "Unexpected conformance of type:\n" + " " + << type_name_ << "\n\n"; + } + + // Get a list of all expected failures that did not actually fail + // (or that were not run). + std::vector nonfailing_tests; + absl::c_set_difference(test_names, test_failures_, std::back_inserter(nonfailing_tests)); + + // Get a list of all "expected failures" that were never actually run. + std::vector unrun_tests; + absl::c_set_difference(nonfailing_tests, test_successes_, std::back_inserter(unrun_tests)); + + // Report when the user specified tests that were not run. + if (!unrun_tests.empty()) + { + const bool tests_were_run = + !(test_failures_.empty() && test_successes_.empty()); + + // Prepare an assertion result used in the case that tests pass that were + // expected to fail. + ::testing::AssertionResult result = ::testing::AssertionFailure(); + result << "When testing type:\n " << type_name_ + << "\n\nThe following tests were expected to fail but were not " + "run"; + + if (tests_were_run) + result << " (was the test name spelled correctly?)"; + + result << ":\n\n"; + + // List all of the tests that unexpectedly passed. + for (const auto& test_name : unrun_tests) + { + result << " " << test_name << "\n"; + } + + if (!tests_were_run) + result << "\nNo tests were run."; + + if (!test_failures_.empty()) + { + // List test failures + result << "\nThe tests that were run and failed are:\n\n"; + for (const auto& test_name : test_failures_) + { + result << " " << test_name << "\n"; + } + } + + if (!test_successes_.empty()) + { + // List test successes + result << "\nThe tests that were run and succeeded are:\n\n"; + for (const auto& test_name : test_successes_) + { + result << " " << test_name << "\n"; + } + } + + return result; + } + + // If some tests passed when they were expected to fail, alert the caller. + if (nonfailing_tests.empty()) + return ::testing::AssertionSuccess(); + + // Prepare an assertion result used in the case that tests pass that were + // expected to fail. + ::testing::AssertionResult unexpected_successes = + ::testing::AssertionFailure(); + unexpected_successes << "When testing type:\n " << type_name_ + << "\n\nThe following tests passed when they were " + "expected to fail:\n\n"; + + // List all of the tests that unexpectedly passed. + for (const auto& test_name : nonfailing_tests) + { + unexpected_successes << " " << test_name << "\n"; + } + + return unexpected_successes; + } + + private: + void outputDivider() + { + assertion_result_ << "========================================"; + } + + void addTestFailureImpl() + { + } + + template + void addTestFailureImpl(const H& head, const T&... tail) + { + assertion_result_ << head; + addTestFailureImpl(tail...); + } + + ::testing::AssertionResult assertion_result_; + std::set test_failures_; + std::set test_successes_; + std::string type_name_; + bool has_error_ = false; + }; + + template + struct PropertiesOfImpl + { + }; + + template + struct PropertiesOfImpl> + { + using type = typename T::properties; + }; + + template + struct PropertiesOfImpl> + { + using type = typename PropertiesOfImpl::type; + }; + + template + struct PropertiesOf : PropertiesOfImpl + { + }; + + template + using PropertiesOfT = typename PropertiesOf::type; + + // NOTE: These enums use this naming convention to be consistent with the + // standard trait names, which is useful since it allows us to match up each + // enum name with a corresponding trait name in macro definitions. + + // An enum that describes the various expectations on an operations existence. + enum class function_support + { + maybe, + yes, + nothrow, + trivial + }; + + constexpr const char* PessimisticPropertyDescription(function_support v) + { + return v == function_support::maybe ? "no" : v == function_support::yes ? "yes, potentially throwing" : + v == function_support::nothrow ? "yes, nothrow" : + "yes, trivial"; + } + + // Return a string that describes the kind of property support that was + // expected. + inline std::string ExpectedFunctionKindList(function_support min, function_support max) + { + if (min == max) + { + std::string result = + absl::StrCat("Expected:\n ", PessimisticPropertyDescription(static_cast(UnderlyingValue(min))), "\n"); + return result; + } + + std::string result = "Expected one of:\n"; + for (auto curr_support = UnderlyingValue(min); + curr_support <= UnderlyingValue(max); + ++curr_support) + { + absl::StrAppend(&result, " ", PessimisticPropertyDescription(static_cast(curr_support)), "\n"); + } + + return result; + } + + template + void ExpectModelOfImpl(ConformanceErrors* errors, Enum min_support, Enum max_support, Enum kind) + { + const auto kind_value = UnderlyingValue(kind); + const auto min_support_value = UnderlyingValue(min_support); + const auto max_support_value = UnderlyingValue(max_support); + + if (!(kind_value >= min_support_value && kind_value <= max_support_value)) + { + errors->addTestFailure( + PropertyName(kind), "**Failed property expectation**\n\n", ExpectedFunctionKindList(static_cast(min_support_value), static_cast(max_support_value)), '\n', "Actual:\n ", PessimisticPropertyDescription(static_cast(kind_value)) + ); + } + else + { + errors->addTestSuccess(PropertyName(kind)); + } + } + +#define ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM(description, name) \ + enum class name \ + { \ + maybe, \ + yes, \ + nothrow, \ + trivial \ + }; \ + \ + constexpr const char* PropertyName(name v) \ + { \ + return description; \ + } \ + static_assert(true, "") // Force a semicolon when using this macro. + + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for default construction", default_constructible); + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for move construction", move_constructible); + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for copy construction", copy_constructible); + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for move assignment", move_assignable); + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for copy assignment", copy_assignable); + ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM("support for destruction", destructible); + +#undef ABSL_INTERNAL_SPECIAL_MEMBER_FUNCTION_ENUM + +#define ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM(description, name) \ + enum class name \ + { \ + maybe, \ + yes, \ + nothrow \ + }; \ + \ + constexpr const char* PropertyName(name v) \ + { \ + return description; \ + } \ + static_assert(true, "") // Force a semicolon when using this macro. + + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for ==", equality_comparable); + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for !=", inequality_comparable); + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for <", less_than_comparable); + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for <=", less_equal_comparable); + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for >=", greater_equal_comparable); + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for >", greater_than_comparable); + + ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM("support for swap", swappable); + +#undef ABSL_INTERNAL_INTRINSIC_FUNCTION_ENUM + + enum class hashable + { + maybe, + yes + }; + + constexpr const char* PropertyName(hashable v) + { + return "support for std::hash"; + } + + template + using AlwaysFalse = std::false_type; + +#define ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(name, property) \ + template \ + constexpr property property##_support_of() \ + { \ + return std::is_##property::value ? std::is_nothrow_##property::value ? absl::is_trivially_##property::value ? property::trivial : property::nothrow : property::yes : property::maybe; \ + } \ + \ + template \ + void ExpectModelOf##name(ConformanceErrors* errors) \ + { \ + (ExpectModelOfImpl)(errors, PropertiesOfT::property##_support, PropertiesOfT::property##_support, property##_support_of()); \ + } + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(DefaultConstructible, default_constructible); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(MoveConstructible, move_constructible); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(CopyConstructible, copy_constructible); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(MoveAssignable, move_assignable); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(CopyAssignable, copy_assignable); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER(Destructible, destructible); + +#undef ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_SPECIAL_MEMBER + + void BoolFunction(bool) noexcept; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction for checking if an operation exists through SFINAE. + // + // `T` is the type to test and Op is an alias containing the expression to test. + template class Op, class = void> + struct IsOpableImpl : std::false_type + { + }; + + template class Op> + struct IsOpableImpl>> : std::true_type + { + }; + + template class Op> + struct IsOpable + { + template + using apply = typename IsOpableImpl::type; + }; + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction for checking if an operation exists and is also noexcept + // through SFINAE and the noexcept operator. + /// + // `T` is the type to test and Op is an alias containing the expression to test. + template class Op, class = void> + struct IsNothrowOpableImpl : std::false_type + { + }; + + template class Op> + struct IsNothrowOpableImpl::value>> : std::true_type + { + }; + + template class Op> + struct IsNothrowOpable + { + template + using apply = typename IsNothrowOpableImpl::type; + }; +// +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// +// A macro that produces the necessary function for reporting what kind of +// support a specific comparison operation has and a function for reporting an +// error if a given type's support for that operation does not meet the expected +// requirements. +#define ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(name, property, op) \ + template() op std::declval()))>> \ + using name = Result; \ + \ + template \ + constexpr property property##_support_of() \ + { \ + return IsOpable::apply::value ? IsNothrowOpable::apply::value ? property::nothrow : property::yes : property::maybe; \ + } \ + \ + template \ + void ExpectModelOf##name(ConformanceErrors* errors) \ + { \ + (ExpectModelOfImpl)(errors, PropertiesOfT::property##_support, PropertiesOfT::property##_support, property##_support_of()); \ + } + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // Generate the necessary support-checking and error reporting functions for + // each of the comparison operators. + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(EqualityComparable, equality_comparable, ==); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(InequalityComparable, inequality_comparable, !=); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(LessThanComparable, less_than_comparable, <); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(LessEqualComparable, less_equal_comparable, <=); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(GreaterEqualComparable, greater_equal_comparable, >=); + + ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON(GreaterThanComparable, greater_than_comparable, >); + +#undef ABSL_INTERNAL_PESSIMISTIC_MODEL_OF_COMPARISON + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // The necessary support-checking and error-reporting functions for swap. + template + constexpr swappable swappable_support_of() + { + return type_traits_internal::IsSwappable::value ? type_traits_internal::IsNothrowSwappable::value ? swappable::nothrow : swappable::yes : swappable::maybe; + } + + template + void ExpectModelOfSwappable(ConformanceErrors* errors) + { + (ExpectModelOfImpl)(errors, PropertiesOfT::swappable_support, PropertiesOfT::swappable_support, swappable_support_of()); + } + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // The necessary support-checking and error-reporting functions for std::hash. + template + constexpr hashable hashable_support_of() + { + return type_traits_internal::IsHashable::value ? hashable::yes : hashable::maybe; + } + + template + void ExpectModelOfHashable(ConformanceErrors* errors) + { + (ExpectModelOfImpl)(errors, PropertiesOfT::hashable_support, PropertiesOfT::hashable_support, hashable_support_of()); + } + // + //////////////////////////////////////////////////////////////////////////////// + + template< + default_constructible DefaultConstructibleValue = + default_constructible::maybe, + move_constructible MoveConstructibleValue = move_constructible::maybe, + copy_constructible CopyConstructibleValue = copy_constructible::maybe, + move_assignable MoveAssignableValue = move_assignable::maybe, + copy_assignable CopyAssignableValue = copy_assignable::maybe, + destructible DestructibleValue = destructible::maybe, + equality_comparable EqualityComparableValue = equality_comparable::maybe, + inequality_comparable InequalityComparableValue = + inequality_comparable::maybe, + less_than_comparable LessThanComparableValue = less_than_comparable::maybe, + less_equal_comparable LessEqualComparableValue = + less_equal_comparable::maybe, + greater_equal_comparable GreaterEqualComparableValue = + greater_equal_comparable::maybe, + greater_than_comparable GreaterThanComparableValue = + greater_than_comparable::maybe, + swappable SwappableValue = swappable::maybe, + hashable HashableValue = hashable::maybe> + struct ConformanceProfile + { + using properties = ConformanceProfile; + + static constexpr default_constructible + default_constructible_support = // NOLINT + DefaultConstructibleValue; + + static constexpr move_constructible move_constructible_support = // NOLINT + MoveConstructibleValue; + + static constexpr copy_constructible copy_constructible_support = // NOLINT + CopyConstructibleValue; + + static constexpr move_assignable move_assignable_support = // NOLINT + MoveAssignableValue; + + static constexpr copy_assignable copy_assignable_support = // NOLINT + CopyAssignableValue; + + static constexpr destructible destructible_support = // NOLINT + DestructibleValue; + + static constexpr equality_comparable equality_comparable_support = // NOLINT + EqualityComparableValue; + + static constexpr inequality_comparable + inequality_comparable_support = // NOLINT + InequalityComparableValue; + + static constexpr less_than_comparable + less_than_comparable_support = // NOLINT + LessThanComparableValue; + + static constexpr less_equal_comparable + less_equal_comparable_support = // NOLINT + LessEqualComparableValue; + + static constexpr greater_equal_comparable + greater_equal_comparable_support = // NOLINT + GreaterEqualComparableValue; + + static constexpr greater_than_comparable + greater_than_comparable_support = // NOLINT + GreaterThanComparableValue; + + static constexpr swappable swappable_support = SwappableValue; // NOLINT + + static constexpr hashable hashable_support = HashableValue; // NOLINT + + static constexpr bool is_default_constructible = // NOLINT + DefaultConstructibleValue != default_constructible::maybe; + + static constexpr bool is_move_constructible = // NOLINT + MoveConstructibleValue != move_constructible::maybe; + + static constexpr bool is_copy_constructible = // NOLINT + CopyConstructibleValue != copy_constructible::maybe; + + static constexpr bool is_move_assignable = // NOLINT + MoveAssignableValue != move_assignable::maybe; + + static constexpr bool is_copy_assignable = // NOLINT + CopyAssignableValue != copy_assignable::maybe; + + static constexpr bool is_destructible = // NOLINT + DestructibleValue != destructible::maybe; + + static constexpr bool is_equality_comparable = // NOLINT + EqualityComparableValue != equality_comparable::maybe; + + static constexpr bool is_inequality_comparable = // NOLINT + InequalityComparableValue != inequality_comparable::maybe; + + static constexpr bool is_less_than_comparable = // NOLINT + LessThanComparableValue != less_than_comparable::maybe; + + static constexpr bool is_less_equal_comparable = // NOLINT + LessEqualComparableValue != less_equal_comparable::maybe; + + static constexpr bool is_greater_equal_comparable = // NOLINT + GreaterEqualComparableValue != greater_equal_comparable::maybe; + + static constexpr bool is_greater_than_comparable = // NOLINT + GreaterThanComparableValue != greater_than_comparable::maybe; + + static constexpr bool is_swappable = // NOLINT + SwappableValue != swappable::maybe; + + static constexpr bool is_hashable = // NOLINT + HashableValue != hashable::maybe; + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // Compliant SFINAE-friendliness is not always present on the standard library + // implementations that we support. This helper-struct (and associated enum) is + // used as a means to conditionally check the hashability support of a type. + enum class CheckHashability + { + no, + yes + }; + + template + struct conservative_hashable_support_of; + + template + struct conservative_hashable_support_of + { + static constexpr hashable Invoke() + { + return hashable::maybe; + } + }; + + template + struct conservative_hashable_support_of + { + static constexpr hashable Invoke() + { + return hashable_support_of(); + } + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // The ConformanceProfile that is expected based on introspection into the type + // by way of trait checks. + template + struct SyntacticConformanceProfileOf + { + using properties = ConformanceProfile< + default_constructible_support_of(), + move_constructible_support_of(), + copy_constructible_support_of(), + move_assignable_support_of(), + copy_assignable_support_of(), + destructible_support_of(), + equality_comparable_support_of(), + inequality_comparable_support_of(), + less_than_comparable_support_of(), + less_equal_comparable_support_of(), + greater_equal_comparable_support_of(), + greater_than_comparable_support_of(), + swappable_support_of(), + conservative_hashable_support_of::Invoke()>; + }; + +#define ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(type, name) \ + template \ + constexpr type ConformanceProfile< \ + DefaultConstructibleValue, \ + MoveConstructibleValue, \ + CopyConstructibleValue, \ + MoveAssignableValue, \ + CopyAssignableValue, \ + DestructibleValue, \ + EqualityComparableValue, \ + InequalityComparableValue, \ + LessThanComparableValue, \ + LessEqualComparableValue, \ + GreaterEqualComparableValue, \ + GreaterThanComparableValue, \ + SwappableValue, \ + HashableValue>::name + +#define ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(type) \ + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(type, type##_support); \ + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(bool, is_##type) + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(default_constructible); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_constructible); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_constructible); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_assignable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_assignable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(destructible); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(equality_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(inequality_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(less_than_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(less_equal_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_equal_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_than_comparable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(swappable); + ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(hashable); +#endif + +#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF +#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL + + // Retrieve the enum with the minimum underlying value. + // Note: std::min is not constexpr in C++11, which is why this is necessary. + template + constexpr H MinEnum(H head) + { + return head; + } + + template + constexpr H MinEnum(H head, N next, T... tail) + { + return (UnderlyingValue)(head) < (UnderlyingValue)(next) ? (MinEnum)(head, tail...) : (MinEnum)(next, tail...); + } + + template + struct MinimalProfiles + { + static constexpr default_constructible + default_constructible_support = // NOLINT + (MinEnum)(PropertiesOfT::default_constructible_support...); + + static constexpr move_constructible move_constructible_support = // NOLINT + (MinEnum)(PropertiesOfT::move_constructible_support...); + + static constexpr copy_constructible copy_constructible_support = // NOLINT + (MinEnum)(PropertiesOfT::copy_constructible_support...); + + static constexpr move_assignable move_assignable_support = // NOLINT + (MinEnum)(PropertiesOfT::move_assignable_support...); + + static constexpr copy_assignable copy_assignable_support = // NOLINT + (MinEnum)(PropertiesOfT::copy_assignable_support...); + + static constexpr destructible destructible_support = // NOLINT + (MinEnum)(PropertiesOfT::destructible_support...); + + static constexpr equality_comparable equality_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::equality_comparable_support...); + + static constexpr inequality_comparable + inequality_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::inequality_comparable_support...); + + static constexpr less_than_comparable + less_than_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::less_than_comparable_support...); + + static constexpr less_equal_comparable + less_equal_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::less_equal_comparable_support...); + + static constexpr greater_equal_comparable + greater_equal_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::greater_equal_comparable_support...); + + static constexpr greater_than_comparable + greater_than_comparable_support = // NOLINT + (MinEnum)(PropertiesOfT::greater_than_comparable_support...); + + static constexpr swappable swappable_support = // NOLINT + (MinEnum)(PropertiesOfT::swappable_support...); + + static constexpr hashable hashable_support = // NOLINT + (MinEnum)(PropertiesOfT::hashable_support...); + + using properties = ConformanceProfile< + default_constructible_support, + move_constructible_support, + copy_constructible_support, + move_assignable_support, + copy_assignable_support, + destructible_support, + equality_comparable_support, + inequality_comparable_support, + less_than_comparable_support, + less_equal_comparable_support, + greater_equal_comparable_support, + greater_than_comparable_support, + swappable_support, + hashable_support>; + }; + + // Retrieve the enum with the greatest underlying value. + // Note: std::max is not constexpr in C++11, which is why this is necessary. + template + constexpr H MaxEnum(H head) + { + return head; + } + + template + constexpr H MaxEnum(H head, N next, T... tail) + { + return (UnderlyingValue)(next) < (UnderlyingValue)(head) ? (MaxEnum)(head, tail...) : (MaxEnum)(next, tail...); + } + + template + struct CombineProfilesImpl + { + static constexpr default_constructible + default_constructible_support = // NOLINT + (MaxEnum)(PropertiesOfT::default_constructible_support...); + + static constexpr move_constructible move_constructible_support = // NOLINT + (MaxEnum)(PropertiesOfT::move_constructible_support...); + + static constexpr copy_constructible copy_constructible_support = // NOLINT + (MaxEnum)(PropertiesOfT::copy_constructible_support...); + + static constexpr move_assignable move_assignable_support = // NOLINT + (MaxEnum)(PropertiesOfT::move_assignable_support...); + + static constexpr copy_assignable copy_assignable_support = // NOLINT + (MaxEnum)(PropertiesOfT::copy_assignable_support...); + + static constexpr destructible destructible_support = // NOLINT + (MaxEnum)(PropertiesOfT::destructible_support...); + + static constexpr equality_comparable equality_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::equality_comparable_support...); + + static constexpr inequality_comparable + inequality_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::inequality_comparable_support...); + + static constexpr less_than_comparable + less_than_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::less_than_comparable_support...); + + static constexpr less_equal_comparable + less_equal_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::less_equal_comparable_support...); + + static constexpr greater_equal_comparable + greater_equal_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::greater_equal_comparable_support...); + + static constexpr greater_than_comparable + greater_than_comparable_support = // NOLINT + (MaxEnum)(PropertiesOfT::greater_than_comparable_support...); + + static constexpr swappable swappable_support = // NOLINT + (MaxEnum)(PropertiesOfT::swappable_support...); + + static constexpr hashable hashable_support = // NOLINT + (MaxEnum)(PropertiesOfT::hashable_support...); + + using properties = ConformanceProfile< + default_constructible_support, + move_constructible_support, + copy_constructible_support, + move_assignable_support, + copy_assignable_support, + destructible_support, + equality_comparable_support, + inequality_comparable_support, + less_than_comparable_support, + less_equal_comparable_support, + greater_equal_comparable_support, + greater_than_comparable_support, + swappable_support, + hashable_support>; + }; + + // NOTE: We use this as opposed to a direct alias of CombineProfilesImpl so that + // when named aliases of CombineProfiles are created (such as in + // conformance_aliases.h), we only pay for the combination algorithm on the + // profiles that are actually used. + template + struct CombineProfiles + { + using profile_alias_of = CombineProfilesImpl; + }; + + template<> + struct CombineProfiles<> + { + using properties = ConformanceProfile<>; + }; + + template + struct StrongProfileTypedef + { + using properties = PropertiesOfT; + }; + + template + struct IsProfileImpl : std::false_type + { + }; + + template + struct IsProfileImpl>> : std::true_type + { + }; + + template + struct IsProfile : IsProfileImpl::type + { + }; + + // A tag that describes which set of properties we will check when the user + // requires a strict match in conformance (as opposed to a loose match which + // allows more-refined support of any given operation). + // + // Currently only the RegularityDomain exists and it includes all operations + // that the conformance testing suite knows about. The intent is that if the + // suite is expanded to support extension, such as for checking conformance of + // concepts like Iterators or Containers, additional corresponding domains can + // be created. + struct RegularityDomain + { + }; + + } // namespace types_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_PROFILE_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing.h b/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing.h new file mode 100644 index 00000000..14f18431 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing.h @@ -0,0 +1,1394 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// conformance_testing.h +// ----------------------------------------------------------------------------- +// + +#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_ +#define ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_ + +//////////////////////////////////////////////////////////////////////////////// +// // +// Many templates in this file take a `T` and a `Prof` type as explicit // +// template arguments. These are a type to be checked and a // +// "Regularity Profile" that describes what operations that type `T` is // +// expected to support. See "regularity_profiles.h" for more details // +// regarding Regularity Profiles. // +// // +//////////////////////////////////////////////////////////////////////////////// + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/ascii.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/types/internal/conformance_aliases.h" +#include "absl/types/internal/conformance_archetype.h" +#include "absl/types/internal/conformance_profile.h" +#include "absl/types/internal/conformance_testing_helpers.h" +#include "absl/types/internal/parentheses.h" +#include "absl/types/internal/transform_args.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace types_internal + { + + // Returns true if the compiler incorrectly greedily instantiates constexpr + // templates in any unevaluated context. + constexpr bool constexpr_instantiation_when_unevaluated() + { +#if defined(__apple_build_version__) // TODO(calabrese) Make more specific + return true; +#elif defined(__clang__) + return __clang_major__ < 4; +#elif defined(__GNUC__) + // TODO(calabrese) Figure out why gcc 7 fails (seems like a different bug) + return __GNUC__ < 5 || (__GNUC__ == 5 && __GNUC_MINOR__ < 2) || __GNUC__ >= 7; +#else + return false; +#endif + } + + // Returns true if the standard library being used incorrectly produces an error + // when instantiating the definition of a poisoned std::hash specialization. + constexpr bool poisoned_hash_fails_instantiation() + { +#if defined(_MSC_VER) && !defined(_LIBCPP_VERSION) + return _MSC_VER < 1914; +#else + return false; +#endif + } + + template + struct GeneratorType + { + decltype(std::declval()()) operator()() const + noexcept(noexcept(std::declval()())) + { + return fun(); + } + + Fun fun; + const char* description; + }; + + // A "make" function for the GeneratorType template that deduces the function + // object type. + template::value>** = nullptr> + GeneratorType Generator(Fun fun, const char* description) + { + return GeneratorType{absl::move(fun), description}; + } + + // A type that contains a set of nullary function objects that each return an + // instance of the same type and value (though possibly different + // representations, such as +0 and -0 or two vectors with the same elements but + // with different capacities). + template + struct EquivalenceClassType + { + std::tuple...> generators; + }; + + // A "make" function for the EquivalenceClassType template that deduces the + // function object types and is constrained such that a user can only pass in + // function objects that all have the same return type. + template::value>** = nullptr> + EquivalenceClassType EquivalenceClass(GeneratorType... funs) + { + return {std::make_tuple(absl::move(funs)...)}; + } + + // A type that contains an ordered series of EquivalenceClassTypes, from + // smallest value to largest value. + template + struct OrderedEquivalenceClasses + { + std::tuple eq_classes; + }; + + // An object containing the parts of a given (name, initialization expression), + // and is capable of generating a string that describes the given. + struct GivenDeclaration + { + std::string outputDeclaration(std::size_t width) const + { + const std::size_t indent_size = 2; + std::string result = absl::StrCat(" ", name); + + if (!expression.empty()) + { + // Indent + result.resize(indent_size + width, ' '); + absl::StrAppend(&result, " = ", expression, ";\n"); + } + else + { + absl::StrAppend(&result, ";\n"); + } + + return result; + } + + std::string name; + std::string expression; + }; + + // Produce a string that contains all of the givens of an error report. + template + std::string PrepareGivenContext(const Decls&... decls) + { + const std::size_t width = (std::max)({decls.name.size()...}); + return absl::StrCat("Given:\n", decls.outputDeclaration(width)..., "\n"); + } + + //////////////////////////////////////////////////////////////////////////////// + // Function objects that perform a check for each comparison operator // + //////////////////////////////////////////////////////////////////////////////// + +#define ABSL_INTERNAL_EXPECT_OP(name, op) \ + struct Expect##name \ + { \ + template \ + void operator()(absl::string_view test_name, absl::string_view context, const T& lhs, const T& rhs, absl::string_view lhs_name, absl::string_view rhs_name) const \ + { \ + if (!static_cast(lhs op rhs)) \ + { \ + errors->addTestFailure( \ + test_name, absl::StrCat(context, "**Unexpected comparison result**\n" \ + "\n" \ + "Expression:\n" \ + " ", \ + lhs_name, \ + " " #op " ", \ + rhs_name, \ + "\n" \ + "\n" \ + "Expected: true\n" \ + " Actual: false") \ + ); \ + } \ + else \ + { \ + errors->addTestSuccess(test_name); \ + } \ + } \ + \ + ConformanceErrors* errors; \ + }; \ + \ + struct ExpectNot##name \ + { \ + template \ + void operator()(absl::string_view test_name, absl::string_view context, const T& lhs, const T& rhs, absl::string_view lhs_name, absl::string_view rhs_name) const \ + { \ + if (lhs op rhs) \ + { \ + errors->addTestFailure( \ + test_name, absl::StrCat(context, "**Unexpected comparison result**\n" \ + "\n" \ + "Expression:\n" \ + " ", \ + lhs_name, \ + " " #op " ", \ + rhs_name, \ + "\n" \ + "\n" \ + "Expected: false\n" \ + " Actual: true") \ + ); \ + } \ + else \ + { \ + errors->addTestSuccess(test_name); \ + } \ + } \ + \ + ConformanceErrors* errors; \ + } + + ABSL_INTERNAL_EXPECT_OP(Eq, ==); + ABSL_INTERNAL_EXPECT_OP(Ne, !=); + ABSL_INTERNAL_EXPECT_OP(Lt, <); + ABSL_INTERNAL_EXPECT_OP(Le, <=); + ABSL_INTERNAL_EXPECT_OP(Ge, >=); + ABSL_INTERNAL_EXPECT_OP(Gt, >); + +#undef ABSL_INTERNAL_EXPECT_OP + + // A function object that verifies that two objects hash to the same value by + // way of the std::hash specialization. + struct ExpectSameHash + { + template + void operator()(absl::string_view test_name, absl::string_view context, const T& lhs, const T& rhs, absl::string_view lhs_name, absl::string_view rhs_name) const + { + if (std::hash()(lhs) != std::hash()(rhs)) + { + errors->addTestFailure( + test_name, absl::StrCat(context, "**Unexpected hash result**\n" + "\n" + "Expression:\n" + " std::hash()(", + lhs_name, + ") == std::hash()(", + rhs_name, + ")\n" + "\n" + "Expected: true\n" + " Actual: false") + ); + } + else + { + errors->addTestSuccess(test_name); + } + } + + ConformanceErrors* errors; + }; + + // A function template that takes two objects and verifies that each comparison + // operator behaves in a way that is consistent with equality. It has "OneWay" + // in the name because the first argument will always be the left-hand operand + // of the corresponding comparison operator and the second argument will + // always be the right-hand operand. It will never switch that order. + // At a higher level in the test suite, the one-way form is called once for each + // of the two possible orders whenever lhs and rhs are not the same initializer. + template + void ExpectOneWayEquality(ConformanceErrors* errors, absl::string_view test_name, absl::string_view context, const T& lhs, const T& rhs, absl::string_view lhs_name, absl::string_view rhs_name) + { + If::is_equality_comparable>::Invoke( + ExpectEq{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_inequality_comparable>::Invoke( + ExpectNotNe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_less_than_comparable>::Invoke( + ExpectNotLt{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_less_equal_comparable>::Invoke( + ExpectLe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_greater_equal_comparable>::Invoke( + ExpectGe{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_greater_than_comparable>::Invoke( + ExpectNotGt{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + + If::is_hashable>::Invoke( + ExpectSameHash{errors}, test_name, context, lhs, rhs, lhs_name, rhs_name + ); + } + + // A function template that takes two objects and verifies that each comparison + // operator behaves in a way that is consistent with equality. This function + // differs from ExpectOneWayEquality in that this will do checks with argument + // order reversed in addition to in-order. + template + void ExpectEquality(ConformanceErrors* errors, absl::string_view test_name, absl::string_view context, const T& lhs, const T& rhs, absl::string_view lhs_name, absl::string_view rhs_name) + { + (ExpectOneWayEquality)(errors, test_name, context, lhs, rhs, lhs_name, rhs_name); + (ExpectOneWayEquality)(errors, test_name, context, rhs, lhs, rhs_name, lhs_name); + } + + // Given a generator, makes sure that a generated value and a moved-from + // generated value are equal. + template + struct ExpectMoveConstructOneGenerator + { + template + void operator()(const Fun& generator) const + { + const T object = generator(); + const T moved_object = absl::move(generator()); // Force no elision. + + (ExpectEquality)(errors, "Move construction", PrepareGivenContext(GivenDeclaration{"const _T object", generator.description}, GivenDeclaration{"const _T moved_object", std::string("std::move(") + generator.description + ")"}), object, moved_object, "object", "moved_object"); + } + + ConformanceErrors* errors; + }; + + // Given a generator, makes sure that a generated value and a copied-from + // generated value are equal. + template + struct ExpectCopyConstructOneGenerator + { + template + void operator()(const Fun& generator) const + { + const T object = generator(); + const T copied_object = static_cast(generator()); + + (ExpectEquality)(errors, "Copy construction", PrepareGivenContext(GivenDeclaration{"const _T object", generator.description}, GivenDeclaration{"const _T copied_object", std::string("static_cast(") + generator.description + ")"}), object, copied_object, "object", "copied_object"); + } + + ConformanceErrors* errors; + }; + + // Default-construct and do nothing before destruction. + // + // This is useful in exercising the codepath of default construction followed by + // destruction, but does not explicitly test anything. An example of where this + // might fail is a default destructor that default-initializes a scalar and a + // destructor reads the value of that member. Sanitizers can catch this as long + // as our test attempts to execute such a case. + template + struct ExpectDefaultConstructWithDestruct + { + void operator()() const + { + // Scoped so that destructor gets called before reporting success. + { + T object; + static_cast(object); + } + + errors->addTestSuccess("Default construction"); + } + + ConformanceErrors* errors; + }; + + // Check move-assign into a default-constructed object. + template + struct ExpectDefaultConstructWithMoveAssign + { + template + void operator()(const Fun& generator) const + { + const T source_of_truth = generator(); + T object; + object = generator(); + + (ExpectEquality)(errors, "Move assignment", PrepareGivenContext(GivenDeclaration{"const _T object", generator.description}, GivenDeclaration{"_T object", ""}, GivenDeclaration{"object", generator.description}), object, source_of_truth, "std::as_const(object)", "source_of_truth"); + } + + ConformanceErrors* errors; + }; + + // Check copy-assign into a default-constructed object. + template + struct ExpectDefaultConstructWithCopyAssign + { + template + void operator()(const Fun& generator) const + { + const T source_of_truth = generator(); + T object; + object = static_cast(generator()); + + (ExpectEquality)(errors, "Copy assignment", PrepareGivenContext(GivenDeclaration{"const _T source_of_truth", generator.description}, GivenDeclaration{"_T object", ""}, GivenDeclaration{"object", std::string("static_cast(") + generator.description + ")"}), object, source_of_truth, "std::as_const(object)", "source_of_truth"); + } + + ConformanceErrors* errors; + }; + + // Perform a self move-assign. + template + struct ExpectSelfMoveAssign + { + template + void operator()(const Fun& generator) const + { + T object = generator(); + object = absl::move(object); + + // NOTE: Self move-assign results in a valid-but-unspecified state. + + (ExpectEquality)(errors, "Move assignment", PrepareGivenContext(GivenDeclaration{"_T object", generator.description}, GivenDeclaration{"object", "std::move(object)"}), object, object, "object", "object"); + } + + ConformanceErrors* errors; + }; + + // Perform a self copy-assign. + template + struct ExpectSelfCopyAssign + { + template + void operator()(const Fun& generator) const + { + const T source_of_truth = generator(); + T object = generator(); + const T& const_object = object; + object = const_object; + + (ExpectEquality)(errors, "Copy assignment", PrepareGivenContext(GivenDeclaration{"const _T source_of_truth", generator.description}, GivenDeclaration{"_T object", generator.description}, GivenDeclaration{"object", "std::as_const(object)"}), const_object, source_of_truth, "std::as_const(object)", "source_of_truth"); + } + + ConformanceErrors* errors; + }; + + // Perform a self-swap. + template + struct ExpectSelfSwap + { + template + void operator()(const Fun& generator) const + { + const T source_of_truth = generator(); + T object = generator(); + + type_traits_internal::Swap(object, object); + + std::string preliminary_info = absl::StrCat( + PrepareGivenContext( + GivenDeclaration{"const _T source_of_truth", generator.description}, + GivenDeclaration{"_T object", generator.description} + ), + "After performing a self-swap:\n" + " using std::swap;\n" + " swap(object, object);\n" + "\n" + ); + + (ExpectEquality)(errors, "Swap", std::move(preliminary_info), object, source_of_truth, "std::as_const(object)", "source_of_truth"); + } + + ConformanceErrors* errors; + }; + + // Perform each of the single-generator checks when necessary operations are + // supported. + template + struct ExpectSelfComparison + { + template + void operator()(const Fun& generator) const + { + const T object = generator(); + (ExpectOneWayEquality)(errors, "Comparison", PrepareGivenContext(GivenDeclaration{"const _T object", generator.description}), object, object, "object", "object"); + } + + ConformanceErrors* errors; + }; + + // Perform each of the single-generator checks when necessary operations are + // supported. + template + struct ExpectConsistency + { + template + void operator()(const Fun& generator) const + { + If::is_move_constructible>::Invoke( + ExpectMoveConstructOneGenerator{errors}, generator + ); + + If::is_copy_constructible>::Invoke( + ExpectCopyConstructOneGenerator{errors}, generator + ); + + If::is_default_constructible && + PropertiesOfT::is_move_assignable>:: + Invoke(ExpectDefaultConstructWithMoveAssign{errors}, generator); + + If::is_default_constructible && + PropertiesOfT::is_copy_assignable>:: + Invoke(ExpectDefaultConstructWithCopyAssign{errors}, generator); + + If::is_move_assignable>::Invoke( + ExpectSelfMoveAssign{errors}, generator + ); + + If::is_copy_assignable>::Invoke( + ExpectSelfCopyAssign{errors}, generator + ); + + If::is_swappable>::Invoke( + ExpectSelfSwap{errors}, generator + ); + } + + ConformanceErrors* errors; + }; + + // Check move-assign with two different values. + template + struct ExpectMoveAssign + { + template + void operator()(const Fun0& generator0, const Fun1& generator1) const + { + const T source_of_truth1 = generator1(); + T object = generator0(); + object = generator1(); + + (ExpectEquality)(errors, "Move assignment", PrepareGivenContext(GivenDeclaration{"const _T source_of_truth1", generator1.description}, GivenDeclaration{"_T object", generator0.description}, GivenDeclaration{"object", generator1.description}), object, source_of_truth1, "std::as_const(object)", "source_of_truth1"); + } + + ConformanceErrors* errors; + }; + + // Check copy-assign with two different values. + template + struct ExpectCopyAssign + { + template + void operator()(const Fun0& generator0, const Fun1& generator1) const + { + const T source_of_truth1 = generator1(); + T object = generator0(); + object = static_cast(generator1()); + + (ExpectEquality)(errors, "Copy assignment", PrepareGivenContext(GivenDeclaration{"const _T source_of_truth1", generator1.description}, GivenDeclaration{"_T object", generator0.description}, GivenDeclaration{"object", std::string("static_cast(") + generator1.description + ")"}), object, source_of_truth1, "std::as_const(object)", "source_of_truth1"); + } + + ConformanceErrors* errors; + }; + + // Check swap with two different values. + template + struct ExpectSwap + { + template + void operator()(const Fun0& generator0, const Fun1& generator1) const + { + const T source_of_truth0 = generator0(); + const T source_of_truth1 = generator1(); + T object0 = generator0(); + T object1 = generator1(); + + type_traits_internal::Swap(object0, object1); + + const std::string context = + PrepareGivenContext( + GivenDeclaration{"const _T source_of_truth0", generator0.description}, + GivenDeclaration{"const _T source_of_truth1", generator1.description}, + GivenDeclaration{"_T object0", generator0.description}, + GivenDeclaration{"_T object1", generator1.description} + ) + + "After performing a swap:\n" + " using std::swap;\n" + " swap(object0, object1);\n" + "\n"; + + (ExpectEquality)(errors, "Swap", context, object0, source_of_truth1, "std::as_const(object0)", "source_of_truth1"); + (ExpectEquality)(errors, "Swap", context, object1, source_of_truth0, "std::as_const(object1)", "source_of_truth0"); + } + + ConformanceErrors* errors; + }; + + // Validate that `generator0` and `generator1` produce values that are equal. + template + struct ExpectEquivalenceClassComparison + { + template + void operator()(const Fun0& generator0, const Fun1& generator1) const + { + const T object0 = generator0(); + const T object1 = generator1(); + + (ExpectEquality)(errors, "Comparison", PrepareGivenContext(GivenDeclaration{"const _T object0", generator0.description}, GivenDeclaration{"const _T object1", generator1.description}), object0, object1, "object0", "object1"); + } + + ConformanceErrors* errors; + }; + + // Validate that all objects in the same equivalence-class have the same value. + template + struct ExpectEquivalenceClassConsistency + { + template + void operator()(const Fun0& generator0, const Fun1& generator1) const + { + If::is_move_assignable>::Invoke( + ExpectMoveAssign{errors}, generator0, generator1 + ); + + If::is_copy_assignable>::Invoke( + ExpectCopyAssign{errors}, generator0, generator1 + ); + + If::is_swappable>::Invoke(ExpectSwap{errors}, generator0, generator1); + } + + ConformanceErrors* errors; + }; + + // Given a "lesser" object and a "greater" object, perform every combination of + // comparison operators supported for the type, expecting consistent results. + template + void ExpectOrdered(ConformanceErrors* errors, absl::string_view context, const T& small, const T& big, absl::string_view small_name, absl::string_view big_name) + { + const absl::string_view test_name = "Comparison"; + + If::is_equality_comparable>::Invoke( + ExpectNotEq{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_equality_comparable>::Invoke( + ExpectNotEq{errors}, test_name, context, big, small, big_name, small_name + ); + + If::is_inequality_comparable>::Invoke( + ExpectNe{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_inequality_comparable>::Invoke( + ExpectNe{errors}, test_name, context, big, small, big_name, small_name + ); + + If::is_less_than_comparable>::Invoke( + ExpectLt{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_less_than_comparable>::Invoke( + ExpectNotLt{errors}, test_name, context, big, small, big_name, small_name + ); + + If::is_less_equal_comparable>::Invoke( + ExpectLe{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_less_equal_comparable>::Invoke( + ExpectNotLe{errors}, test_name, context, big, small, big_name, small_name + ); + + If::is_greater_equal_comparable>::Invoke( + ExpectNotGe{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_greater_equal_comparable>::Invoke( + ExpectGe{errors}, test_name, context, big, small, big_name, small_name + ); + + If::is_greater_than_comparable>::Invoke( + ExpectNotGt{errors}, test_name, context, small, big, small_name, big_name + ); + If::is_greater_than_comparable>::Invoke( + ExpectGt{errors}, test_name, context, big, small, big_name, small_name + ); + } + + // For every two elements of an equivalence class, makes sure that those two + // elements compare equal, including checks with the same argument passed as + // both operands. + template + struct ExpectEquivalenceClassComparisons + { + template + void operator()(EquivalenceClassType eq_class) const + { + (ForEachTupleElement)(ExpectSelfComparison{errors}, eq_class.generators); + + (ForEveryTwo)(ExpectEquivalenceClassComparison{errors}, eq_class.generators); + } + + ConformanceErrors* errors; + }; + + // For every element of an equivalence class, makes sure that the element is + // self-consistent (in other words, if any of move/copy/swap are defined, + // perform those operations and make such that results and operands still + // compare equal to known values whenever it is required for that operation. + template + struct ExpectEquivalenceClass + { + template + void operator()(EquivalenceClassType eq_class) const + { + (ForEachTupleElement)(ExpectConsistency{errors}, eq_class.generators); + + (ForEveryTwo)(ExpectEquivalenceClassConsistency{errors}, eq_class.generators); + } + + ConformanceErrors* errors; + }; + + // Validate that the passed-in argument is a generator of a greater value than + // the one produced by the "small_gen" datamember with respect to all of the + // comparison operators that Prof requires, with both argument orders to test. + template + struct ExpectBiggerGeneratorThanComparisons + { + template + void operator()(BigGenerator big_gen) const + { + const T small = small_gen(); + const T big = big_gen(); + + (ExpectOrdered)(errors, PrepareGivenContext(GivenDeclaration{"const _T small", small_gen.description}, GivenDeclaration{"const _T big", big_gen.description}), small, big, "small", "big"); + } + + SmallGenerator small_gen; + ConformanceErrors* errors; + }; + + // Perform all of the move, copy, and swap checks on the value generated by + // `small_gen` and the value generated by `big_gen`. + template + struct ExpectBiggerGeneratorThan + { + template + void operator()(BigGenerator big_gen) const + { + If::is_move_assignable>::Invoke( + ExpectMoveAssign{errors}, small_gen, big_gen + ); + If::is_move_assignable>::Invoke( + ExpectMoveAssign{errors}, big_gen, small_gen + ); + + If::is_copy_assignable>::Invoke( + ExpectCopyAssign{errors}, small_gen, big_gen + ); + If::is_copy_assignable>::Invoke( + ExpectCopyAssign{errors}, big_gen, small_gen + ); + + If::is_swappable>::Invoke(ExpectSwap{errors}, small_gen, big_gen); + } + + SmallGenerator small_gen; + ConformanceErrors* errors; + }; + + // Validate that the result of a generator is greater than the results of all + // generators in an equivalence class with respect to comparisons. + template + struct ExpectBiggerGeneratorThanEqClassesComparisons + { + template + void operator()(BigEqClass big_eq_class) const + { + (ForEachTupleElement)( + ExpectBiggerGeneratorThanComparisons{small_gen, errors}, + big_eq_class.generators + ); + } + + SmallGenerator small_gen; + ConformanceErrors* errors; + }; + + // Validate that the non-comparison binary operations required by Prof are + // correct for the result of each generator of big_eq_class and a generator of + // the logically smaller value returned by small_gen. + template + struct ExpectBiggerGeneratorThanEqClasses + { + template + void operator()(BigEqClass big_eq_class) const + { + (ForEachTupleElement)( + ExpectBiggerGeneratorThan{small_gen, errors}, + big_eq_class.generators + ); + } + + SmallGenerator small_gen; + ConformanceErrors* errors; + }; + + // Validate that each equivalence class that is passed is logically less than + // the equivalence classes that comes later on in the argument list. + template + struct ExpectOrderedEquivalenceClassesComparisons + { + template + struct Impl + { + // Validate that the value produced by `small_gen` is less than all of the + // values generated by those of the logically larger equivalence classes. + template + void operator()(SmallGenerator small_gen) const + { + (ForEachTupleElement)(ExpectBiggerGeneratorThanEqClassesComparisons{small_gen, errors}, big_eq_classes); + } + + std::tuple big_eq_classes; + ConformanceErrors* errors; + }; + + // When given no equivalence classes, no validation is necessary. + void operator()() const + { + } + + template + void operator()(SmallEqClass small_eq_class, BigEqClasses... big_eq_classes) const + { + // For each generator in the first equivalence class, make sure that it is + // less than each of those in the logically greater equivalence classes. + (ForEachTupleElement)( + Impl{std::make_tuple(absl::move(big_eq_classes)...), errors}, + small_eq_class.generators + ); + + // Recurse so that all equivalence class combinations are checked. + (*this)(absl::move(big_eq_classes)...); + } + + ConformanceErrors* errors; + }; + + // Validate that the non-comparison binary operations required by Prof are + // correct for the result of each generator of big_eq_classes and a generator of + // the logically smaller value returned by small_gen. + template + struct ExpectOrderedEquivalenceClasses + { + template + struct Impl + { + template + void operator()(SmallGenerator small_gen) const + { + (ForEachTupleElement)( + ExpectBiggerGeneratorThanEqClasses{small_gen, errors}, + big_eq_classes + ); + } + + std::tuple big_eq_classes; + ConformanceErrors* errors; + }; + + // Check that small_eq_class is logically consistent and also is logically + // less than all values in big_eq_classes. + template + void operator()(SmallEqClass small_eq_class, BigEqClasses... big_eq_classes) const + { + (ForEachTupleElement)( + Impl{std::make_tuple(absl::move(big_eq_classes)...), errors}, + small_eq_class.generators + ); + + (*this)(absl::move(big_eq_classes)...); + } + + // Terminating case of operator(). + void operator()() const + { + } + + ConformanceErrors* errors; + }; + + // Validate that a type meets the syntactic requirements of std::hash if the + // range of profiles requires it. + template + struct ExpectHashable + { + void operator()() const + { + ExpectModelOfHashable(errors); + } + + ConformanceErrors* errors; + }; + + // Validate that the type `T` meets all of the requirements associated with + // `MinProf` and without going beyond the syntactic properties of `MaxProf`. + template + struct ExpectModels + { + void operator()(ConformanceErrors* errors) const + { + ExpectModelOfDefaultConstructible(errors); + ExpectModelOfMoveConstructible(errors); + ExpectModelOfCopyConstructible(errors); + ExpectModelOfMoveAssignable(errors); + ExpectModelOfCopyAssignable(errors); + ExpectModelOfDestructible(errors); + ExpectModelOfEqualityComparable(errors); + ExpectModelOfInequalityComparable(errors); + ExpectModelOfLessThanComparable(errors); + ExpectModelOfLessEqualComparable(errors); + ExpectModelOfGreaterEqualComparable(errors); + ExpectModelOfGreaterThanComparable(errors); + ExpectModelOfSwappable(errors); + + // Only check hashability on compilers that have a compliant default-hash. + If::Invoke( + ExpectHashable{errors} + ); + } + }; + + // A metafunction that yields a Profile matching the set of properties that are + // safe to be checked (lack-of-hashability is only checked on standard library + // implementations that are standards compliant in that they provide a std::hash + // primary template that is SFINAE-friendly) + template + struct MinimalCheckableProfile + { + using type = + MinimalProfiles, PropertiesOfT::is_hashable && poisoned_hash_fails_instantiation() ? CheckHashability::no : CheckHashability::yes>>>; + }; + + // An identity metafunction + template + struct Always + { + using type = T; + }; + + // Validate the T meets all of the necessary requirements of LogicalProf, with + // syntactic requirements defined by the profile range [MinProf, MaxProf]. + template + ConformanceErrors ExpectRegularityImpl( + OrderedEquivalenceClasses vals + ) + { + ConformanceErrors errors((NameOf())); + + If::Invoke( + ExpectModels(), &errors + ); + + using minimal_profile = typename absl::conditional_t< + constexpr_instantiation_when_unevaluated(), + Always, + MinimalCheckableProfile>::type; + + If::is_default_constructible>::Invoke( + ExpectDefaultConstructWithDestruct{&errors} + ); + + ////////////////////////////////////////////////////////////////////////////// + // Perform all comparison checks first, since later checks depend on their + // correctness. + // + // Check all of the comparisons for all values in the same equivalence + // class (equal with respect to comparison operators and hash the same). + (ForEachTupleElement)( + ExpectEquivalenceClassComparisons{&errors}, + vals.eq_classes + ); + + // Check all of the comparisons for each combination of values that are in + // different equivalence classes (not equal with respect to comparison + // operators). + absl::apply( + ExpectOrderedEquivalenceClassesComparisons{&errors}, + vals.eq_classes + ); + // + ////////////////////////////////////////////////////////////////////////////// + + // Perform remaining checks, relying on comparisons. + // TODO(calabrese) short circuit if any comparisons above failed. + (ForEachTupleElement)(ExpectEquivalenceClass{&errors}, vals.eq_classes); + + absl::apply(ExpectOrderedEquivalenceClasses{&errors}, vals.eq_classes); + + return errors; + } + + // A type that represents a range of profiles that are acceptable to be matched. + // + // `MinProf` is the minimum set of syntactic requirements that must be met. + // + // `MaxProf` is the maximum set of syntactic requirements that must be met. + // This maximum is particularly useful for certain "strictness" checking. Some + // examples for when this is useful: + // + // * Making sure that a type is move-only (rather than simply movable) + // + // * Making sure that a member function is *not* noexcept in cases where it + // cannot be noexcept, such as if a dependent datamember has certain + // operations that are not noexcept. + // + // * Making sure that a type tightly matches a spec, such as the standard. + // + // `LogicalProf` is the Profile for which run-time testing is to take place. + // + // Note: The reason for `LogicalProf` is because it is often the case, when + // dealing with templates, that a declaration of a given operation is specified, + // but whose body would fail to instantiate. Examples include the + // copy-constructor of a standard container when the element-type is move-only, + // or the comparison operators of a standard container when the element-type + // does not have the necessary comparison operations defined. The `LogicalProf` + // parameter allows us to capture the intent of what should be tested at + // run-time, even in the cases where syntactically it might otherwise appear as + // though the type undergoing testing supports more than it actually does. + template + struct ProfileRange + { + using logical_profile = LogicalProf; + using min_profile = MinProf; + using max_profile = MaxProf; + }; + + // Similar to ProfileRange except that it creates a profile range that is + // coupled with a Domain and is used when testing that a type matches exactly + // the "minimum" requirements of LogicalProf. + template + struct StrictProfileRange + { + // We do not yet support extension. + static_assert( + std::is_same::value, + "Currently, the only valid StrictnessDomain is RegularityDomain." + ); + using strictness_domain = StrictnessDomain; + using logical_profile = LogicalProf; + using min_profile = MinProf; + using max_profile = MaxProf; + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that creates a StrictProfileRange from a Domain and either a + // Profile or ProfileRange. + template + struct MakeStrictProfileRange; + + template + struct MakeStrictProfileRange + { + using type = StrictProfileRange; + }; + + template + struct MakeStrictProfileRange> + { + using type = + StrictProfileRange; + }; + + template + using MakeStrictProfileRangeT = + typename MakeStrictProfileRange::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // A profile in the RegularityDomain with the strongest possible requirements. + using MostStrictProfile = + CombineProfiles; + + // Forms a ProfileRange that treats the Profile as the bare minimum requirements + // of a type. + template + using LooseProfileRange = StrictProfileRange; + + template + using MakeLooseProfileRangeT = Prof; + + //////////////////////////////////////////////////////////////////////////////// + // + // The following classes implement the metafunction ProfileRangeOfT that + // takes either a Profile or ProfileRange and yields the ProfileRange to be + // used during testing. + // + template + struct ProfileRangeOfImpl; + + template + struct ProfileRangeOfImpl>> + { + using type = LooseProfileRange; + }; + + template + struct ProfileRangeOf : ProfileRangeOfImpl + { + }; + + template + struct ProfileRangeOf< + StrictProfileRange> + { + using type = + StrictProfileRange; + }; + + template + using ProfileRangeOfT = typename ProfileRangeOf::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // Extract the logical profile of a range (what will be runtime tested). + template + using LogicalProfileOfT = typename ProfileRangeOfT::logical_profile; + + // Extract the minimal syntactic profile of a range (error if not at least). + template + using MinProfileOfT = typename ProfileRangeOfT::min_profile; + + // Extract the maximum syntactic profile of a range (error if more than). + template + using MaxProfileOfT = typename ProfileRangeOfT::max_profile; + + //////////////////////////////////////////////////////////////////////////////// + // + template + struct IsProfileOrProfileRange : IsProfile::type + { + }; + + template + struct IsProfileOrProfileRange< + StrictProfileRange> : std::true_type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // TODO(calabrese): Consider naming the functions in this class the same as + // the macros (defined later on) so that auto-complete leads to the correct name + // and so that a user cannot accidentally call a function rather than the macro + // form. + template + struct ExpectConformanceOf + { + // Add a value to be tested. Subsequent calls to this function on the same + // object must specify logically "larger" values with respect to the + // comparison operators of the type, if any. + // + // NOTE: This function should not be called directly. A stateless lambda is + // implicitly formed and passed when using the INITIALIZER macro at the bottom + // of this file. + template>, T>::value>** = nullptr> + ABSL_MUST_USE_RESULT ExpectConformanceOf> + initializer(GeneratorType fun) && + { + return { + {std::tuple_cat(absl::move(ordered_vals.eq_classes), std::make_tuple((EquivalenceClass)(absl::move(fun))))}, + std::move(expected_failed_tests)}; + } + + template...>::value>** = nullptr> + ABSL_MUST_USE_RESULT ExpectConformanceOf + due_to(TestNames&&... test_names) && + { + (InsertEach)(&expected_failed_tests, absl::AsciiStrToLower(absl::string_view(test_names))...); + + return {absl::move(ordered_vals), std::move(expected_failed_tests)}; + } + + template...>::value>** = nullptr> + ABSL_MUST_USE_RESULT ExpectConformanceOf + due_to(TestNames&&... test_names) && + { + // TODO(calabrese) Instead have DUE_TO only exist via a CRTP base. + // This would produce better errors messages than the static_assert. + static_assert(!ExpectSuccess, "DUE_TO cannot be called when conformance is expected -- did " + "you mean to use ASSERT_NONCONFORMANCE_OF?"); + } + + // Add a value to be tested. Subsequent calls to this function on the same + // object must specify logically "larger" values with respect to the + // comparison operators of the type, if any. + // + // NOTE: This function should not be called directly. A stateful lambda is + // implicitly formed and passed when using the INITIALIZER macro at the bottom + // of this file. + template>, T>::value>** = nullptr> + ABSL_MUST_USE_RESULT ExpectConformanceOf> + dont_class_directly_stateful_initializer(GeneratorType fun) && + { + return { + {std::tuple_cat(absl::move(ordered_vals.eq_classes), std::make_tuple((EquivalenceClass)(absl::move(fun))))}, + std::move(expected_failed_tests)}; + } + + // Add a set of value to be tested, where each value is equal with respect to + // the comparison operators and std::hash specialization, if defined. + template< + class... Funs, + absl::void_t>, + T>::value>...>** = nullptr> + ABSL_MUST_USE_RESULT ExpectConformanceOf> + equivalence_class(GeneratorType... funs) && + { + return {{std::tuple_cat(absl::move(ordered_vals.eq_classes), std::make_tuple((EquivalenceClass)(absl::move(funs)...)))}, std::move(expected_failed_tests)}; + } + + // Execute the tests for the captured set of values, strictly matching a range + // of expected profiles in a given domain. + template< + class ProfRange, + absl::enable_if_t::value>** = nullptr> + ABSL_MUST_USE_RESULT ::testing::AssertionResult with_strict_profile( + ProfRange /*profile*/ + ) + { + ConformanceErrors test_result = + (ExpectRegularityImpl< + T, + LogicalProfileOfT, + MinProfileOfT, + MaxProfileOfT>)(absl::move(ordered_vals)); + + return ExpectSuccess ? test_result.assertionResult() : test_result.expectFailedTests(expected_failed_tests); + } + + // Execute the tests for the captured set of values, loosely matching a range + // of expected profiles (loose in that an interface is allowed to be more + // refined that a profile suggests, such as a type having a noexcept copy + // constructor when all that is required is that the copy constructor exists). + template::value>** = nullptr> + ABSL_MUST_USE_RESULT ::testing::AssertionResult with_loose_profile( + Prof /*profile*/ + ) + { + ConformanceErrors test_result = + (ExpectRegularityImpl< + T, + Prof, + Prof, + CombineProfiles>)(absl:: + move(ordered_vals)); + + return ExpectSuccess ? test_result.assertionResult() : test_result.expectFailedTests(expected_failed_tests); + } + + OrderedEquivalenceClasses ordered_vals; + std::set expected_failed_tests; + }; + + template + using ExpectConformanceOfType = ExpectConformanceOf; + + template + using ExpectNonconformanceOfType = + ExpectConformanceOf; + + struct EquivalenceClassMaker + { + // TODO(calabrese) Constrain to callable + template + static GeneratorType initializer(GeneratorType fun) + { + return fun; + } + }; + +// A top-level macro that begins the builder pattern. +// +// The argument here takes the datatype to be tested. +#define ABSL_INTERNAL_ASSERT_CONFORMANCE_OF(...) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if ABSL_INTERNAL_LPAREN \ + const ::testing::AssertionResult gtest_ar = \ + ABSL_INTERNAL_LPAREN ::absl::types_internal::ExpectConformanceOfType< \ + __VA_ARGS__>() + +// Akin to ASSERT_CONFORMANCE_OF except that it expects failure and tries to +// match text. +#define ABSL_INTERNAL_ASSERT_NONCONFORMANCE_OF(...) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if ABSL_INTERNAL_LPAREN \ + const ::testing::AssertionResult gtest_ar = \ + ABSL_INTERNAL_LPAREN ::absl::types_internal::ExpectNonconformanceOfType< \ + __VA_ARGS__>() + +//////////////////////////////////////////////////////////////////////////////// +// NOTE: The following macros look like they are recursive, but are not (macros +// cannot recurse). These actually refer to member functions of the same name. +// This is done intentionally so that a user cannot accidentally invoke a +// member function of the conformance-testing suite without going through the +// macro. +//////////////////////////////////////////////////////////////////////////////// + +// Specify expected test failures as comma-separated strings. +#define DUE_TO(...) due_to(__VA_ARGS__) + +// Specify a value to be tested. +// +// Note: Internally, this takes an expression and turns it into the return value +// of lambda that captures no data. The expression is stringized during +// preprocessing so that it can be used in error reports. +#define INITIALIZER(...) \ + initializer(::absl::types_internal::Generator( \ + [] { return __VA_ARGS__; }, ABSL_INTERNAL_STRINGIZE(__VA_ARGS__) \ + )) + +// Specify a value to be tested. +// +// Note: Internally, this takes an expression and turns it into the return value +// of lambda that captures data by reference. The expression is stringized +// during preprocessing so that it can be used in error reports. +#define STATEFUL_INITIALIZER(...) \ + stateful_initializer(::absl::types_internal::Generator( \ + [&] { return __VA_ARGS__; }, ABSL_INTERNAL_STRINGIZE(__VA_ARGS__) \ + )) + +// Used in the builder-pattern. +// +// Takes a series of INITIALIZER and/or STATEFUL_INITIALIZER invocations and +// forwards them along to be tested, grouping them such that the testing suite +// knows that they are supposed to represent the same logical value (the values +// compare the same, hash the same, etc.). +#define EQUIVALENCE_CLASS(...) \ + equivalence_class(ABSL_INTERNAL_TRANSFORM_ARGS( \ + ABSL_INTERNAL_PREPEND_EQ_MAKER, __VA_ARGS__ \ + )) + +// An invocation of this or WITH_STRICT_PROFILE must end the builder-pattern. +// It takes a Profile as its argument. +// +// This executes the tests and allows types that are "more referined" than the +// profile specifies, but not less. For instance, if the Profile specifies +// noexcept copy-constructiblity, the test will fail if the copy-constructor is +// not noexcept, however, it will succeed if the copy constructor is trivial. +// +// This is useful for testing that a type meets some minimum set of +// requirements. +#define WITH_LOOSE_PROFILE(...) \ + with_loose_profile( \ + ::absl::types_internal::MakeLooseProfileRangeT<__VA_ARGS__>() \ + ) \ + ABSL_INTERNAL_RPAREN ABSL_INTERNAL_RPAREN; \ + else GTEST_FATAL_FAILURE_(gtest_ar.failure_message()) // NOLINT + +// An invocation of this or WITH_STRICT_PROFILE must end the builder-pattern. +// It takes a Domain and a Profile as its arguments. +// +// This executes the tests and disallows types that differ at all from the +// properties of the Profile. For instance, if the Profile specifies noexcept +// copy-constructiblity, the test will fail if the copy constructor is trivial. +// +// This is useful for testing that a type does not do anything more than a +// specification requires, such as to minimize things like Hyrum's Law, or more +// commonly, to prevent a type from being "accidentally" copy-constructible in +// a way that may produce incorrect results, simply because the user forget to +// delete that operation. +#define WITH_STRICT_PROFILE(...) \ + with_strict_profile( \ + ::absl::types_internal::MakeStrictProfileRangeT<__VA_ARGS__>() \ + ) \ + ABSL_INTERNAL_RPAREN ABSL_INTERNAL_RPAREN; \ + else GTEST_FATAL_FAILURE_(gtest_ar.failure_message()) // NOLINT + +// Internal macro that is used in the internals of the EDSL when forming +// equivalence classes. +#define ABSL_INTERNAL_PREPEND_EQ_MAKER(arg) \ + ::absl::types_internal::EquivalenceClassMaker().arg + + } // namespace types_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing_helpers.h b/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing_helpers.h new file mode 100644 index 00000000..41c25ab6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/conformance_testing_helpers.h @@ -0,0 +1,434 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_ +#define ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_ + +// Checks to determine whether or not we can use abi::__cxa_demangle +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(OS_ANDROID) +#define ABSL_INTERNAL_OS_ANDROID +#endif + +// We support certain compilers only. See demangle.h for details. +#if defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__)) +#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 0 +#elif (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \ + !defined(__mips__) +#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 1 +#elif defined(__clang__) && !defined(_MSC_VER) +#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 1 +#else +#define ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE 0 +#endif + +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/utility/utility.h" + +#if ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE +#include + +#include +#endif + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace types_internal + { + + // Return a readable name for type T. + template + absl::string_view NameOfImpl() + { +// TODO(calabrese) Investigate using debugging:internal_demangle as a fallback. +#if ABSL_TYPES_INTERNAL_HAS_CXA_DEMANGLE + int status = 0; + char* demangled_name = nullptr; + + demangled_name = + abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); + + if (status == 0 && demangled_name != nullptr) + { + return demangled_name; + } + else + { + return typeid(T).name(); + } +#else + return typeid(T).name(); +#endif + // NOTE: We intentionally leak demangled_name so that it remains valid + // throughout the remainder of the program. + } + + // Given a type, returns as nice of a type name as we can produce (demangled). + // + // Note: This currently strips cv-qualifiers and references, but that is okay + // because we only use this internally with unqualified object types. + template + std::string NameOf() + { + static const absl::string_view result = NameOfImpl(); + return std::string(result); + } + + //////////////////////////////////////////////////////////////////////////////// + // + // Metafunction to check if a type is callable with no explicit arguments + template + struct IsNullaryCallableImpl : std::false_type + { + }; + + template + struct IsNullaryCallableImpl< + Fun, + absl::void_t()())>> : std::true_type + { + using result_type = decltype(std::declval()()); + + template + using for_type = std::is_same; + + using void_if_true = void; + }; + + template + struct IsNullaryCallable : IsNullaryCallableImpl + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A type that contains a function object that returns an instance of a type + // that is undergoing conformance testing. This function is required to always + // return the same value upon invocation. + template + struct GeneratorType; + + // A type that contains a tuple of GeneratorType where each Fun has the + // same return type. The result of each of the different generators should all + // be equal values, though the underlying object representation may differ (such + // as if one returns 0.0 and another return -0.0, or if one returns an empty + // vector and another returns an empty vector with a different capacity. + template + struct EquivalenceClassType; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction to check if a type is a specialization of EquivalenceClassType + template + struct IsEquivalenceClass : std::false_type + { + }; + + template<> + struct IsEquivalenceClass> : std::true_type + { + using self = IsEquivalenceClass; + + // A metafunction to check if this EquivalenceClassType is a valid + // EquivalenceClassType for a type `ValueType` that is undergoing testing + template + using for_type = std::true_type; + }; + + template + struct IsEquivalenceClass> : std::true_type + { + using self = IsEquivalenceClass; + + // The type undergoing conformance testing that this EquivalenceClass + // corresponds to + using result_type = typename IsNullaryCallable::result_type; + + // A metafunction to check if this EquivalenceClassType is a valid + // EquivalenceClassType for a type `ValueType` that is undergoing testing + template + using for_type = std::is_same; + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A type that contains an ordered series of EquivalenceClassTypes, where the + // the function object of each underlying GeneratorType has the same return type + // + // These equivalence classes are required to be in a logical ascending order + // that is consistent with comparison operators that are defined for the return + // type of each GeneratorType, if any. + template + struct OrderedEquivalenceClasses; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction to determine the return type of the function object contained + // in a GeneratorType specialization. + template + struct ResultOfGenerator + { + }; + + template + struct ResultOfGenerator> + { + using type = decltype(std::declval()()); + }; + + template + using ResultOfGeneratorT = typename ResultOfGenerator>::type; + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that yields true iff each of Funs is a GeneratorType + // specialization and they all contain functions with the same return type + template + struct AreGeneratorsWithTheSameReturnTypeImpl : std::false_type + { + }; + + template<> + struct AreGeneratorsWithTheSameReturnTypeImpl : std::true_type + { + }; + + template + struct AreGeneratorsWithTheSameReturnTypeImpl< + typename std::enable_if, + ResultOfGeneratorT>...>::value>::type, + Head, + Tail...> : std::true_type + { + }; + + template + struct AreGeneratorsWithTheSameReturnType : AreGeneratorsWithTheSameReturnTypeImpl::type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that yields true iff each of Funs is an EquivalenceClassType + // specialization and they all contain GeneratorType specializations that have + // the same return type + template + struct AreEquivalenceClassesOfTheSameType + { + static_assert(sizeof...(EqClasses) != sizeof...(EqClasses), ""); + }; + + template<> + struct AreEquivalenceClassesOfTheSameType<> : std::true_type + { + using self = AreEquivalenceClassesOfTheSameType; + + // Metafunction to check that a type is the same as all of the equivalence + // classes, if any. + // Note: In this specialization there are no equivalence classes, so the + // value type is always compatible. + template + using for_type = std::true_type; + }; + + template + struct AreEquivalenceClassesOfTheSameType> : std::true_type + { + using self = AreEquivalenceClassesOfTheSameType; + + // Metafunction to check that a type is the same as all of the equivalence + // classes, if any. + template + using for_type = typename IsEquivalenceClass< + EquivalenceClassType>::template for_type; + }; + + template + struct AreEquivalenceClassesOfTheSameType< + EquivalenceClassType<>, + EquivalenceClassType<>, + TailEqClasses...> : AreEquivalenceClassesOfTheSameType::self + { + }; + + template + struct AreEquivalenceClassesOfTheSameType< + EquivalenceClassType<>, + EquivalenceClassType, + TailEqClasses...> : AreEquivalenceClassesOfTheSameType, TailEqClasses...>::self + { + }; + + template + struct AreEquivalenceClassesOfTheSameType< + EquivalenceClassType, + EquivalenceClassType<>, + TailEqClasses...> : AreEquivalenceClassesOfTheSameType, TailEqClasses...>::self + { + }; + + template + struct AreEquivalenceClassesOfTheSameType< + EquivalenceClassType, + EquivalenceClassType, + TailEqClasses...> : absl::conditional_t::template for_type::result_type>::value, AreEquivalenceClassesOfTheSameType, TailEqClasses...>, std::false_type> + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // Execute a function for each passed-in parameter. + template + void ForEachParameter(const Fun& fun, const Cases&... cases) + { + const std::initializer_list results = { + (static_cast(fun(cases)), true)...}; + + (void)results; + } + + // Execute a function on each passed-in parameter (using a bound function). + template + struct ForEachParameterFun + { + template + void operator()(const T&... cases) const + { + (ForEachParameter)(fun, cases...); + } + + Fun fun; + }; + + // Execute a function on each element of a tuple. + template + void ForEachTupleElement(const Fun& fun, const Tup& tup) + { + absl::apply(ForEachParameterFun{fun}, tup); + } + + //////////////////////////////////////////////////////////////////////////////// + // + // Execute a function for each combination of two elements of a tuple, including + // combinations of an element with itself. + template + struct ForEveryTwoImpl + { + template + struct WithBoundLhs + { + template + void operator()(const Rhs& rhs) const + { + fun(lhs, rhs); + } + + Fun fun; + Lhs lhs; + }; + + template + void operator()(const Lhs& lhs) const + { + (ForEachTupleElement)(WithBoundLhs{fun, lhs}, args); + } + + Fun fun; + std::tuple args; + }; + + template + void ForEveryTwo(const Fun& fun, std::tuple args) + { + (ForEachTupleElement)(ForEveryTwoImpl{fun, args}, args); + } + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + // + // Insert all values into an associative container + template + void InsertEach(Container* cont) + { + } + + template + void InsertEach(Container* cont, H&& head, T&&... tail) + { + cont->insert(head); + (InsertEach)(cont, tail...); + } + // + //////////////////////////////////////////////////////////////////////////////// + // A template with a nested "Invoke" static-member-function that executes a + // passed-in Callable when `Condition` is true, otherwise it ignores the + // Callable. This is useful for executing a function object with a condition + // that corresponds to whether or not the Callable can be safely instantiated. + // It has some overlapping uses with C++17 `if constexpr`. + template + struct If; + + template<> + struct If + { + template + static void Invoke(const Fun& /*fun*/, P&&... /*args*/) + { + } + }; + + template<> + struct If + { + template + static void Invoke(const Fun& fun, P&&... args) + { + // TODO(calabrese) Use std::invoke equivalent instead of function-call. + fun(absl::forward

(args)...); + } + }; + +// +// ABSL_INTERNAL_STRINGIZE(...) +// +// This variadic macro transforms its arguments into a c-string literal after +// expansion. +// +// Example: +// +// ABSL_INTERNAL_STRINGIZE(std::array) +// +// Results in: +// +// "std::array" +#define ABSL_INTERNAL_STRINGIZE(...) ABSL_INTERNAL_STRINGIZE_IMPL((__VA_ARGS__)) +#define ABSL_INTERNAL_STRINGIZE_IMPL(arg) ABSL_INTERNAL_STRINGIZE_IMPL2 arg +#define ABSL_INTERNAL_STRINGIZE_IMPL2(...) #__VA_ARGS__ + + } // namespace types_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_INTERNAL_CONFORMANCE_TESTING_HELPERS_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/optional.h b/CAPI/cpp/grpc/include/absl/types/internal/optional.h new file mode 100644 index 00000000..4d9e352d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/optional.h @@ -0,0 +1,406 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef ABSL_TYPES_INTERNAL_OPTIONAL_H_ +#define ABSL_TYPES_INTERNAL_OPTIONAL_H_ + +#include +#include +#include +#include + +#include "absl/base/internal/inline_variable.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Forward declaration + template + class optional; + + namespace optional_internal + { + + // This tag type is used as a constructor parameter type for `nullopt_t`. + struct init_t + { + explicit init_t() = default; + }; + + struct empty_struct + { + }; + + // This class stores the data in optional. + // It is specialized based on whether T is trivially destructible. + // This is the specialization for non trivially destructible type. + template::value> + class optional_data_dtor_base + { + struct dummy_type + { + static_assert(sizeof(T) % sizeof(empty_struct) == 0, ""); + // Use an array to avoid GCC 6 placement-new warning. + empty_struct data[sizeof(T) / sizeof(empty_struct)]; + }; + + protected: + // Whether there is data or not. + bool engaged_; + // Data storage + union + { + T data_; + dummy_type dummy_; + }; + + void destruct() noexcept + { + if (engaged_) + { + // `data_` must be initialized if `engaged_` is true. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + data_.~T(); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + engaged_ = false; + } + } + + // dummy_ must be initialized for constexpr constructor. + constexpr optional_data_dtor_base() noexcept : + engaged_(false), + dummy_{{}} + { + } + + template + constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) : + engaged_(true), + data_(absl::forward(args)...) + { + } + + ~optional_data_dtor_base() + { + destruct(); + } + }; + + // Specialization for trivially destructible type. + template + class optional_data_dtor_base + { + struct dummy_type + { + static_assert(sizeof(T) % sizeof(empty_struct) == 0, ""); + // Use array to avoid GCC 6 placement-new warning. + empty_struct data[sizeof(T) / sizeof(empty_struct)]; + }; + + protected: + // Whether there is data or not. + bool engaged_; + // Data storage + union + { + T data_; + dummy_type dummy_; + }; + void destruct() noexcept + { + engaged_ = false; + } + + // dummy_ must be initialized for constexpr constructor. + constexpr optional_data_dtor_base() noexcept : + engaged_(false), + dummy_{{}} + { + } + + template + constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args) : + engaged_(true), + data_(absl::forward(args)...) + { + } + }; + + template + class optional_data_base : public optional_data_dtor_base + { + protected: + using base = optional_data_dtor_base; + using base::base; + + template + void construct(Args&&... args) + { + // Use dummy_'s address to work around casting cv-qualified T* to void*. + ::new (static_cast(&this->dummy_)) T(std::forward(args)...); + this->engaged_ = true; + } + + template + void assign(U&& u) + { + if (this->engaged_) + { + this->data_ = std::forward(u); + } + else + { + construct(std::forward(u)); + } + } + }; + + // TODO(absl-team): Add another class using + // std::is_trivially_move_constructible trait when available to match + // http://cplusplus.github.io/LWG/lwg-defects.html#2900, for types that + // have trivial move but nontrivial copy. + // Also, we should be checking is_trivially_copyable here, which is not + // supported now, so we use is_trivially_* traits instead. + template::value&& absl::is_trivially_copy_assignable::type>::value&& std::is_trivially_destructible::value> + class optional_data; + + // Trivially copyable types + template + class optional_data : public optional_data_base + { + protected: + using optional_data_base::optional_data_base; + }; + + template + class optional_data : public optional_data_base + { + protected: + using optional_data_base::optional_data_base; + + optional_data() = default; + + optional_data(const optional_data& rhs) : + optional_data_base() + { + if (rhs.engaged_) + { + this->construct(rhs.data_); + } + } + + optional_data(optional_data&& rhs) noexcept( + absl::default_allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value + ) : + optional_data_base() + { + if (rhs.engaged_) + { + this->construct(std::move(rhs.data_)); + } + } + + optional_data& operator=(const optional_data& rhs) + { + if (rhs.engaged_) + { + this->assign(rhs.data_); + } + else + { + this->destruct(); + } + return *this; + } + + optional_data& operator=(optional_data&& rhs) noexcept( + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value + ) + { + if (rhs.engaged_) + { + this->assign(std::move(rhs.data_)); + } + else + { + this->destruct(); + } + return *this; + } + }; + + // Ordered by level of restriction, from low to high. + // Copyable implies movable. + enum class copy_traits + { + copyable = 0, + movable = 1, + non_movable = 2 + }; + + // Base class for enabling/disabling copy/move constructor. + template + class optional_ctor_base; + + template<> + class optional_ctor_base + { + public: + constexpr optional_ctor_base() = default; + optional_ctor_base(const optional_ctor_base&) = default; + optional_ctor_base(optional_ctor_base&&) = default; + optional_ctor_base& operator=(const optional_ctor_base&) = default; + optional_ctor_base& operator=(optional_ctor_base&&) = default; + }; + + template<> + class optional_ctor_base + { + public: + constexpr optional_ctor_base() = default; + optional_ctor_base(const optional_ctor_base&) = delete; + optional_ctor_base(optional_ctor_base&&) = default; + optional_ctor_base& operator=(const optional_ctor_base&) = default; + optional_ctor_base& operator=(optional_ctor_base&&) = default; + }; + + template<> + class optional_ctor_base + { + public: + constexpr optional_ctor_base() = default; + optional_ctor_base(const optional_ctor_base&) = delete; + optional_ctor_base(optional_ctor_base&&) = delete; + optional_ctor_base& operator=(const optional_ctor_base&) = default; + optional_ctor_base& operator=(optional_ctor_base&&) = default; + }; + + // Base class for enabling/disabling copy/move assignment. + template + class optional_assign_base; + + template<> + class optional_assign_base + { + public: + constexpr optional_assign_base() = default; + optional_assign_base(const optional_assign_base&) = default; + optional_assign_base(optional_assign_base&&) = default; + optional_assign_base& operator=(const optional_assign_base&) = default; + optional_assign_base& operator=(optional_assign_base&&) = default; + }; + + template<> + class optional_assign_base + { + public: + constexpr optional_assign_base() = default; + optional_assign_base(const optional_assign_base&) = default; + optional_assign_base(optional_assign_base&&) = default; + optional_assign_base& operator=(const optional_assign_base&) = delete; + optional_assign_base& operator=(optional_assign_base&&) = default; + }; + + template<> + class optional_assign_base + { + public: + constexpr optional_assign_base() = default; + optional_assign_base(const optional_assign_base&) = default; + optional_assign_base(optional_assign_base&&) = default; + optional_assign_base& operator=(const optional_assign_base&) = delete; + optional_assign_base& operator=(optional_assign_base&&) = delete; + }; + + template + struct ctor_copy_traits + { + static constexpr copy_traits traits = + std::is_copy_constructible::value ? copy_traits::copyable : std::is_move_constructible::value ? copy_traits::movable : + copy_traits::non_movable; + }; + + template + struct assign_copy_traits + { + static constexpr copy_traits traits = + absl::is_copy_assignable::value && std::is_copy_constructible::value ? copy_traits::copyable : absl::is_move_assignable::value && std::is_move_constructible::value ? copy_traits::movable : + copy_traits::non_movable; + }; + + // Whether T is constructible or convertible from optional. + template + struct is_constructible_convertible_from_optional : std::integral_constant&>::value || std::is_constructible&&>::value || std::is_constructible&>::value || std::is_constructible&&>::value || std::is_convertible&, T>::value || std::is_convertible&&, T>::value || std::is_convertible&, T>::value || std::is_convertible&&, T>::value> + { + }; + + // Whether T is constructible or convertible or assignable from optional. + template + struct is_constructible_convertible_assignable_from_optional : std::integral_constant::value || std::is_assignable&>::value || std::is_assignable&&>::value || std::is_assignable&>::value || std::is_assignable&&>::value> + { + }; + + // Helper function used by [optional.relops], [optional.comp_with_t], + // for checking whether an expression is convertible to bool. + bool convertible_to_bool(bool); + + // Base class for std::hash>: + // If std::hash> is enabled, it provides operator() to + // compute the hash; Otherwise, it is disabled. + // Reference N4659 23.14.15 [unord.hash]. + template + struct optional_hash_base + { + optional_hash_base() = delete; + optional_hash_base(const optional_hash_base&) = delete; + optional_hash_base(optional_hash_base&&) = delete; + optional_hash_base& operator=(const optional_hash_base&) = delete; + optional_hash_base& operator=(optional_hash_base&&) = delete; + }; + + template + struct optional_hash_base>()(std::declval>()))> + { + using argument_type = absl::optional; + using result_type = size_t; + size_t operator()(const absl::optional& opt) const + { + absl::type_traits_internal::AssertHashEnabled>(); + if (opt) + { + return std::hash>()(*opt); + } + else + { + return static_cast(0x297814aaad196e6dULL); + } + } + }; + + } // namespace optional_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_INTERNAL_OPTIONAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/parentheses.h b/CAPI/cpp/grpc/include/absl/types/internal/parentheses.h new file mode 100644 index 00000000..5aebee8f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/parentheses.h @@ -0,0 +1,34 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// parentheses.h +// ----------------------------------------------------------------------------- +// +// This file contains macros that expand to a left parenthesis and a right +// parenthesis. These are in their own file and are generated from macros +// because otherwise clang-format gets confused and clang-format off directives +// do not help. +// +// The parentheses macros are used when wanting to require a rescan before +// expansion of parenthesized text appearing after a function-style macro name. + +#ifndef ABSL_TYPES_INTERNAL_PARENTHESES_H_ +#define ABSL_TYPES_INTERNAL_PARENTHESES_H_ + +#define ABSL_INTERNAL_LPAREN ( + +#define ABSL_INTERNAL_RPAREN ) + +#endif // ABSL_TYPES_INTERNAL_PARENTHESES_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/span.h b/CAPI/cpp/grpc/include/absl/types/internal/span.h new file mode 100644 index 00000000..f64fe39a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/span.h @@ -0,0 +1,151 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef ABSL_TYPES_INTERNAL_SPAN_H_ +#define ABSL_TYPES_INTERNAL_SPAN_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class Span; + + namespace span_internal + { + // Wrappers for access to container data pointers. + template + constexpr auto GetDataImpl(C& c, char) noexcept // NOLINT(runtime/references) + -> decltype(c.data()) + { + return c.data(); + } + + // Before C++17, std::string::data returns a const char* in all cases. + inline char* GetDataImpl(std::string& s, // NOLINT(runtime/references) + int) noexcept + { + return &s[0]; + } + + template + constexpr auto GetData(C& c) noexcept // NOLINT(runtime/references) + -> decltype(GetDataImpl(c, 0)) + { + return GetDataImpl(c, 0); + } + + // Detection idioms for size() and data(). + template + using HasSize = + std::is_integral().size())>>; + + // We want to enable conversion from vector to Span but + // disable conversion from vector to Span. Here we use + // the fact that U** is convertible to Q* const* if and only if Q is the same + // type or a more cv-qualified version of U. We also decay the result type of + // data() to avoid problems with classes which have a member function data() + // which returns a reference. + template + using HasData = + std::is_convertible()))>*, T* const*>; + + // Extracts value type from a Container + template + struct ElementType + { + using type = typename absl::remove_reference_t::value_type; + }; + + template + struct ElementType + { + using type = T; + }; + + template + using ElementT = typename ElementType::type; + + template + using EnableIfMutable = + typename std::enable_if::value, int>::type; + + template class SpanT, typename T> + bool EqualImpl(SpanT a, SpanT b) + { + static_assert(std::is_const::value, ""); + return std::equal(a.begin(), a.end(), b.begin(), b.end()); + } + + template class SpanT, typename T> + bool LessThanImpl(SpanT a, SpanT b) + { + // We can't use value_type since that is remove_cv_t, so we go the long way + // around. + static_assert(std::is_const::value, ""); + return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + using EnableIfConvertibleTo = + typename std::enable_if::value>::type; + + // IsView is true for types where the return type of .data() is the same for + // mutable and const instances. This isn't foolproof, but it's only used to + // enable a compiler warning. + template + struct IsView + { + static constexpr bool value = false; + }; + + template + struct IsView< + T, + absl::void_t()))>, + absl::void_t()))>> + { + private: + using Container = std::remove_const_t; + using ConstData = + decltype(span_internal::GetData(std::declval())); + using MutData = decltype(span_internal::GetData(std::declval())); + + public: + static constexpr bool value = std::is_same::value; + }; + + // These enablers result in 'int' so they can be used as typenames or defaults + // in template parameters lists. + template + using EnableIfIsView = std::enable_if_t::value, int>; + + template + using EnableIfNotIsView = std::enable_if_t::value, int>; + + } // namespace span_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_INTERNAL_SPAN_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/transform_args.h b/CAPI/cpp/grpc/include/absl/types/internal/transform_args.h new file mode 100644 index 00000000..706bbdde --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/transform_args.h @@ -0,0 +1,215 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// transform_args.h +// ----------------------------------------------------------------------------- +// +// This file contains a higher-order macro that "transforms" each element of a +// a variadic argument by a provided secondary macro. + +#ifndef ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_ +#define ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_ + +// +// ABSL_INTERNAL_CAT(a, b) +// +// This macro takes two arguments and concatenates them together via ## after +// expansion. +// +// Example: +// +// ABSL_INTERNAL_CAT(foo_, bar) +// +// Results in: +// +// foo_bar +#define ABSL_INTERNAL_CAT(a, b) ABSL_INTERNAL_CAT_IMPL(a, b) +#define ABSL_INTERNAL_CAT_IMPL(a, b) a##b + +// +// ABSL_INTERNAL_TRANSFORM_ARGS(m, ...) +// +// This macro takes another macro as an argument followed by a trailing series +// of additional parameters (up to 32 additional arguments). It invokes the +// passed-in macro once for each of the additional arguments, with the +// expansions separated by commas. +// +// Example: +// +// ABSL_INTERNAL_TRANSFORM_ARGS(MY_MACRO, a, b, c) +// +// Results in: +// +// MY_MACRO(a), MY_MACRO(b), MY_MACRO(c) +// +// TODO(calabrese) Handle no arguments as a special case. +#define ABSL_INTERNAL_TRANSFORM_ARGS(m, ...) \ + ABSL_INTERNAL_CAT(ABSL_INTERNAL_TRANSFORM_ARGS, ABSL_INTERNAL_NUM_ARGS(__VA_ARGS__)) \ + (m, __VA_ARGS__) + +#define ABSL_INTERNAL_TRANSFORM_ARGS1(m, a0) m(a0) + +#define ABSL_INTERNAL_TRANSFORM_ARGS2(m, a0, a1) m(a0), m(a1) + +#define ABSL_INTERNAL_TRANSFORM_ARGS3(m, a0, a1, a2) m(a0), m(a1), m(a2) + +#define ABSL_INTERNAL_TRANSFORM_ARGS4(m, a0, a1, a2, a3) \ + m(a0), m(a1), m(a2), m(a3) + +#define ABSL_INTERNAL_TRANSFORM_ARGS5(m, a0, a1, a2, a3, a4) \ + m(a0), m(a1), m(a2), m(a3), m(a4) + +#define ABSL_INTERNAL_TRANSFORM_ARGS6(m, a0, a1, a2, a3, a4, a5) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5) + +#define ABSL_INTERNAL_TRANSFORM_ARGS7(m, a0, a1, a2, a3, a4, a5, a6) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6) + +#define ABSL_INTERNAL_TRANSFORM_ARGS8(m, a0, a1, a2, a3, a4, a5, a6, a7) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7) + +#define ABSL_INTERNAL_TRANSFORM_ARGS9(m, a0, a1, a2, a3, a4, a5, a6, a7, a8) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8) + +#define ABSL_INTERNAL_TRANSFORM_ARGS10(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9) + +#define ABSL_INTERNAL_TRANSFORM_ARGS11(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), m(a10) + +#define ABSL_INTERNAL_TRANSFORM_ARGS12(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11) + +#define ABSL_INTERNAL_TRANSFORM_ARGS13(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12) + +#define ABSL_INTERNAL_TRANSFORM_ARGS14(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13) + +#define ABSL_INTERNAL_TRANSFORM_ARGS15(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14) + +#define ABSL_INTERNAL_TRANSFORM_ARGS16(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15) + +#define ABSL_INTERNAL_TRANSFORM_ARGS17(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16) + +#define ABSL_INTERNAL_TRANSFORM_ARGS18(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17) + +#define ABSL_INTERNAL_TRANSFORM_ARGS19(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18) + +#define ABSL_INTERNAL_TRANSFORM_ARGS20(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19) + +#define ABSL_INTERNAL_TRANSFORM_ARGS21(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20) + +#define ABSL_INTERNAL_TRANSFORM_ARGS22(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21) + +#define ABSL_INTERNAL_TRANSFORM_ARGS23(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22) + +#define ABSL_INTERNAL_TRANSFORM_ARGS24(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23) + +#define ABSL_INTERNAL_TRANSFORM_ARGS25(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24) + +#define ABSL_INTERNAL_TRANSFORM_ARGS26( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25) + +#define ABSL_INTERNAL_TRANSFORM_ARGS27( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26) + +#define ABSL_INTERNAL_TRANSFORM_ARGS28( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27) + +#define ABSL_INTERNAL_TRANSFORM_ARGS29( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \ + m(a28) + +#define ABSL_INTERNAL_TRANSFORM_ARGS30( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \ + m(a28), m(a29) + +#define ABSL_INTERNAL_TRANSFORM_ARGS31( \ + m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30 \ +) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \ + m(a28), m(a29), m(a30) + +#define ABSL_INTERNAL_TRANSFORM_ARGS32(m, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31) \ + m(a0), m(a1), m(a2), m(a3), m(a4), m(a5), m(a6), m(a7), m(a8), m(a9), \ + m(a10), m(a11), m(a12), m(a13), m(a14), m(a15), m(a16), m(a17), m(a18), \ + m(a19), m(a20), m(a21), m(a22), m(a23), m(a24), m(a25), m(a26), m(a27), \ + m(a28), m(a29), m(a30), m(a31) + +#define ABSL_INTERNAL_NUM_ARGS_IMPL(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, result, ...) \ + result + +#define ABSL_INTERNAL_FORCE_EXPANSION(...) __VA_ARGS__ + +#define ABSL_INTERNAL_NUM_ARGS(...) \ + ABSL_INTERNAL_FORCE_EXPANSION(ABSL_INTERNAL_NUM_ARGS_IMPL( \ + __VA_ARGS__, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, \ + )) + +#endif // ABSL_TYPES_INTERNAL_TRANSFORM_ARGS_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/internal/variant.h b/CAPI/cpp/grpc/include/absl/types/internal/variant.h new file mode 100644 index 00000000..74d492ae --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/internal/variant.h @@ -0,0 +1,1842 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Implementation details of absl/types/variant.h, pulled into a +// separate file to avoid cluttering the top of the API header with +// implementation details. + +#ifndef ABSL_TYPES_INTERNAL_VARIANT_H_ +#define ABSL_TYPES_INTERNAL_VARIANT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/identity.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/meta/type_traits.h" +#include "absl/types/bad_variant_access.h" +#include "absl/utility/utility.h" + +#if !defined(ABSL_USES_STD_VARIANT) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class variant; + + ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, static_cast(-1)); + + template + struct variant_size; + + template + struct variant_alternative; + + namespace variant_internal + { + + // NOTE: See specializations below for details. + template + struct VariantAlternativeSfinae + { + }; + + // Requires: I < variant_size_v. + // + // Value: The Ith type of Types... + template + struct VariantAlternativeSfinae> : VariantAlternativeSfinae> + { + }; + + // Value: T0 + template + struct VariantAlternativeSfinae<0, variant> + { + using type = T0; + }; + + template + using VariantAlternativeSfinaeT = typename VariantAlternativeSfinae::type; + + // NOTE: Requires T to be a reference type. + template + struct GiveQualsTo; + + template + struct GiveQualsTo + { + using type = U&; + }; + + template + struct GiveQualsTo + { + using type = U&&; + }; + + template + struct GiveQualsTo + { + using type = const U&; + }; + + template + struct GiveQualsTo + { + using type = const U&&; + }; + + template + struct GiveQualsTo + { + using type = volatile U&; + }; + + template + struct GiveQualsTo + { + using type = volatile U&&; + }; + + template + struct GiveQualsTo + { + using type = volatile const U&; + }; + + template + struct GiveQualsTo + { + using type = volatile const U&&; + }; + + template + using GiveQualsToT = typename GiveQualsTo::type; + + // Convenience alias, since size_t integral_constant is used a lot in this file. + template + using SizeT = std::integral_constant; + + using NPos = SizeT; + + template + struct IndexOfConstructedType + { + }; + + template + struct VariantAccessResultImpl; + + template class Variantemplate, class... T> + struct VariantAccessResultImpl&> + { + using type = typename absl::variant_alternative>::type&; + }; + + template class Variantemplate, class... T> + struct VariantAccessResultImpl&> + { + using type = + const typename absl::variant_alternative>::type&; + }; + + template class Variantemplate, class... T> + struct VariantAccessResultImpl&&> + { + using type = typename absl::variant_alternative>::type&&; + }; + + template class Variantemplate, class... T> + struct VariantAccessResultImpl&&> + { + using type = + const typename absl::variant_alternative>::type&&; + }; + + template + using VariantAccessResult = + typename VariantAccessResultImpl::type; + + // NOTE: This is used instead of std::array to reduce instantiation overhead. + template + struct SimpleArray + { + static_assert(Size != 0, ""); + T value[Size]; + }; + + template + struct AccessedType + { + using type = T; + }; + + template + using AccessedTypeT = typename AccessedType::type; + + template + struct AccessedType> + { + using type = AccessedTypeT; + }; + + template + constexpr T AccessSimpleArray(const T& value) + { + return value; + } + + template + constexpr AccessedTypeT AccessSimpleArray(const SimpleArray& table, std::size_t head_index, SizeT... tail_indices) + { + return AccessSimpleArray(table.value[head_index], tail_indices...); + } + + // Note: Intentionally is an alias. + template + using AlwaysZero = SizeT<0>; + + template + struct VisitIndicesResultImpl + { + using type = absl::result_of_t...)>; + }; + + template + using VisitIndicesResultT = typename VisitIndicesResultImpl::type; + + template + struct MakeVisitationMatrix; + + template + constexpr ReturnType call_with_indices(FunctionObject&& function) + { + static_assert( + std::is_same()(SizeT()...))>::value, + "Not all visitation overloads have the same return type." + ); + return absl::forward(function)(SizeT()...); + } + + template + struct MakeVisitationMatrix, index_sequence> + { + using ResultType = ReturnType (*)(FunctionObject&&); + static constexpr ResultType Run() + { + return &call_with_indices; + } + }; + + template + struct AppendToIndexSequence; + + template + using AppendToIndexSequenceT = typename AppendToIndexSequence::type; + + template + struct AppendToIndexSequence, J> + { + using type = index_sequence; + }; + + template + struct MakeVisitationMatrixImpl; + + template + struct MakeVisitationMatrixImpl, BoundIndices> + { + using ResultType = SimpleArray< + typename MakeVisitationMatrix>::ResultType, + sizeof...(CurrIndices)>; + + static constexpr ResultType Run() + { + return {{MakeVisitationMatrix< + ReturnType, + FunctionObject, + EndIndices, + AppendToIndexSequenceT>::Run()...}}; + } + }; + + template + struct MakeVisitationMatrix, index_sequence> : MakeVisitationMatrixImpl, absl::make_index_sequence, index_sequence> + { + }; + + struct UnreachableSwitchCase + { + template + [[noreturn]] static VisitIndicesResultT Run( + Op&& /*ignored*/ + ) + { +#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \ + (defined(__GNUC__) && !defined(__clang__)) + __builtin_unreachable(); +#elif defined(_MSC_VER) + __assume(false); +#else + // Try to use assert of false being identified as an unreachable intrinsic. + // NOTE: We use assert directly to increase chances of exploiting an assume + // intrinsic. + assert(false); // NOLINT + + // Hack to silence potential no return warning -- cause an infinite loop. + return Run(absl::forward(op)); +#endif // Checks for __builtin_unreachable + } + }; + + template + struct ReachableSwitchCase + { + static VisitIndicesResultT Run(Op&& op) + { + return absl::base_internal::invoke(absl::forward(op), SizeT()); + } + }; + + // The number 33 is just a guess at a reasonable maximum to our switch. It is + // not based on any analysis. The reason it is a power of 2 plus 1 instead of a + // power of 2 is because the number was picked to correspond to a power of 2 + // amount of "normal" alternatives, plus one for the possibility of the user + // providing "monostate" in addition to the more natural alternatives. + ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33); + + // Note: The default-definition is for unreachable cases. + template + struct PickCaseImpl + { + template + using Apply = UnreachableSwitchCase; + }; + + template<> + struct PickCaseImpl + { + template + using Apply = ReachableSwitchCase; + }; + + // Note: This form of dance with template aliases is to make sure that we + // instantiate a number of templates proportional to the number of variant + // alternatives rather than a number of templates proportional to our + // maximum unrolled amount of visitation cases (aliases are effectively + // "free" whereas other template instantiations are costly). + template + using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply; + + template + [[noreturn]] ReturnType TypedThrowBadVariantAccess() + { + absl::variant_internal::ThrowBadVariantAccess(); + } + + // Given N variant sizes, determine the number of cases there would need to be + // in a single switch-statement that would cover every possibility in the + // corresponding N-ary visit operation. + template + struct NumCasesOfSwitch; + + template + struct NumCasesOfSwitch + { + static constexpr std::size_t value = + (HeadNumAlternatives + 1) * + NumCasesOfSwitch::value; + }; + + template<> + struct NumCasesOfSwitch<> + { + static constexpr std::size_t value = 1; + }; + + // A switch statement optimizes better than the table of function pointers. + template + struct VisitIndicesSwitch + { + static_assert(EndIndex <= MaxUnrolledVisitCases, "Maximum unrolled switch size exceeded."); + + template + static VisitIndicesResultT Run(Op&& op, std::size_t i) + { + switch (i) + { + case 0: + return PickCase::Run(absl::forward(op)); + case 1: + return PickCase::Run(absl::forward(op)); + case 2: + return PickCase::Run(absl::forward(op)); + case 3: + return PickCase::Run(absl::forward(op)); + case 4: + return PickCase::Run(absl::forward(op)); + case 5: + return PickCase::Run(absl::forward(op)); + case 6: + return PickCase::Run(absl::forward(op)); + case 7: + return PickCase::Run(absl::forward(op)); + case 8: + return PickCase::Run(absl::forward(op)); + case 9: + return PickCase::Run(absl::forward(op)); + case 10: + return PickCase::Run(absl::forward(op)); + case 11: + return PickCase::Run(absl::forward(op)); + case 12: + return PickCase::Run(absl::forward(op)); + case 13: + return PickCase::Run(absl::forward(op)); + case 14: + return PickCase::Run(absl::forward(op)); + case 15: + return PickCase::Run(absl::forward(op)); + case 16: + return PickCase::Run(absl::forward(op)); + case 17: + return PickCase::Run(absl::forward(op)); + case 18: + return PickCase::Run(absl::forward(op)); + case 19: + return PickCase::Run(absl::forward(op)); + case 20: + return PickCase::Run(absl::forward(op)); + case 21: + return PickCase::Run(absl::forward(op)); + case 22: + return PickCase::Run(absl::forward(op)); + case 23: + return PickCase::Run(absl::forward(op)); + case 24: + return PickCase::Run(absl::forward(op)); + case 25: + return PickCase::Run(absl::forward(op)); + case 26: + return PickCase::Run(absl::forward(op)); + case 27: + return PickCase::Run(absl::forward(op)); + case 28: + return PickCase::Run(absl::forward(op)); + case 29: + return PickCase::Run(absl::forward(op)); + case 30: + return PickCase::Run(absl::forward(op)); + case 31: + return PickCase::Run(absl::forward(op)); + case 32: + return PickCase::Run(absl::forward(op)); + default: + ABSL_ASSERT(i == variant_npos); + return absl::base_internal::invoke(absl::forward(op), NPos()); + } + } + }; + + template + struct VisitIndicesFallback + { + template + static VisitIndicesResultT Run(Op&& op, SizeT... indices) + { + return AccessSimpleArray( + MakeVisitationMatrix, Op, index_sequence<(EndIndices + 1)...>, index_sequence<>>::Run(), + (indices + 1)... + )(absl::forward(op)); + } + }; + + // Take an N-dimensional series of indices and convert them into a single index + // without loss of information. The purpose of this is to be able to convert an + // N-ary visit operation into a single switch statement. + template + struct FlattenIndices; + + template + struct FlattenIndices + { + template + static constexpr std::size_t Run(std::size_t head, SizeType... tail) + { + return head + HeadSize * FlattenIndices::Run(tail...); + } + }; + + template<> + struct FlattenIndices<> + { + static constexpr std::size_t Run() + { + return 0; + } + }; + + // Take a single "flattened" index (flattened by FlattenIndices) and determine + // the value of the index of one of the logically represented dimensions. + template + struct UnflattenIndex + { + static constexpr std::size_t value = + UnflattenIndex::value; + }; + + template + struct UnflattenIndex + { + static constexpr std::size_t value = (I % HeadSize); + }; + + // The backend for converting an N-ary visit operation into a unary visit. + template + struct VisitIndicesVariadicImpl; + + template + struct VisitIndicesVariadicImpl, EndIndices...> + { + // A type that can take an N-ary function object and converts it to a unary + // function object that takes a single, flattened index, and "unflattens" it + // into its individual dimensions when forwarding to the wrapped object. + template + struct FlattenedOp + { + template + VisitIndicesResultT operator()( + SizeT /*index*/ + ) && + { + return base_internal::invoke( + absl::forward(op), + SizeT::value - std::size_t{1}>()... + ); + } + + Op&& op; + }; + + template + static VisitIndicesResultT Run(Op&& op, SizeType... i) + { + return VisitIndicesSwitch::value>::Run( + FlattenedOp{absl::forward(op)}, + FlattenIndices<(EndIndices + std::size_t{1})...>::Run( + (i + std::size_t{1})... + ) + ); + } + }; + + template + struct VisitIndicesVariadic : VisitIndicesVariadicImpl, EndIndices...> + { + }; + + // This implementation will flatten N-ary visit operations into a single switch + // statement when the number of cases would be less than our maximum specified + // switch-statement size. + // TODO(calabrese) + // Based on benchmarks, determine whether the function table approach actually + // does optimize better than a chain of switch statements and possibly update + // the implementation accordingly. Also consider increasing the maximum switch + // size. + template + struct VisitIndices : absl::conditional_t<(NumCasesOfSwitch::value <= MaxUnrolledVisitCases), VisitIndicesVariadic, VisitIndicesFallback> + { + }; + + template + struct VisitIndices : absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases), VisitIndicesSwitch, VisitIndicesFallback> + { + }; + +// Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast` +// below is returning the address of a temporary or local object. +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4172) +#endif // _MSC_VER + + // TODO(calabrese) std::launder + // TODO(calabrese) constexpr + // NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a + // MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details. + template + inline VariantAccessResult AccessUnion(Self&& self, SizeT /*i*/) + { + return reinterpret_cast>(self); + } + +#ifdef _MSC_VER +#pragma warning(pop) +#endif // _MSC_VER + + template + void DeducedDestroy(T& self) + { // NOLINT + self.~T(); + } + + // NOTE: This type exists as a single entity for variant and its bases to + // befriend. It contains helper functionality that manipulates the state of the + // variant, such as the implementation of things like assignment and emplace + // operations. + struct VariantCoreAccess + { + template + static typename VariantType::Variant& Derived(VariantType& self) + { // NOLINT + return static_cast(self); + } + + template + static const typename VariantType::Variant& Derived( + const VariantType& self + ) + { // NOLINT + return static_cast(self); + } + + template + static void Destroy(VariantType& self) + { // NOLINT + Derived(self).destroy(); + self.index_ = absl::variant_npos; + } + + template + static void SetIndex(Variant& self, std::size_t i) + { // NOLINT + self.index_ = i; + } + + template + static void InitFrom(Variant& self, Variant&& other) + { // NOLINT + VisitIndices::value>::Run( + InitFromVisitor{&self, std::forward(other)}, + other.index() + ); + self.index_ = other.index(); + } + + // Access a variant alternative, assuming the index is correct. + template + static VariantAccessResult Access(Variant&& self) + { + // This cast instead of invocation of AccessUnion with an rvalue is a + // workaround for msvc. Without this there is a runtime failure when dealing + // with rvalues. + // TODO(calabrese) Reduce test case and find a simpler workaround. + return static_cast>( + variant_internal::AccessUnion(self.state_, SizeT()) + ); + } + + // Access a variant alternative, throwing if the index is incorrect. + template + static VariantAccessResult CheckedAccess(Variant&& self) + { + if (ABSL_PREDICT_FALSE(self.index_ != I)) + { + TypedThrowBadVariantAccess>(); + } + + return Access(absl::forward(self)); + } + + // The implementation of the move-assignment operation for a variant. + template + struct MoveAssignVisitor + { + using DerivedType = typename VType::Variant; + template + void operator()(SizeT /*new_i*/) const + { + if (left->index_ == NewIndex) + { + Access(*left) = std::move(Access(*right)); + } + else + { + Derived(*left).template emplace( + std::move(Access(*right)) + ); + } + } + + void operator()(SizeT /*new_i*/) const + { + Destroy(*left); + } + + VType* left; + VType* right; + }; + + template + static MoveAssignVisitor MakeMoveAssignVisitor(VType* left, VType* other) + { + return {left, other}; + } + + // The implementation of the assignment operation for a variant. + template + struct CopyAssignVisitor + { + using DerivedType = typename VType::Variant; + template + void operator()(SizeT /*new_i*/) const + { + using New = + typename absl::variant_alternative::type; + + if (left->index_ == NewIndex) + { + Access(*left) = Access(*right); + } + else if (std::is_nothrow_copy_constructible::value || !std::is_nothrow_move_constructible::value) + { + Derived(*left).template emplace(Access(*right)); + } + else + { + Derived(*left) = DerivedType(Derived(*right)); + } + } + + void operator()(SizeT /*new_i*/) const + { + Destroy(*left); + } + + VType* left; + const VType* right; + }; + + template + static CopyAssignVisitor MakeCopyAssignVisitor(VType* left, const VType& other) + { + return {left, &other}; + } + + // The implementation of conversion-assignment operations for variant. + template + struct ConversionAssignVisitor + { + using NewIndex = + variant_internal::IndexOfConstructedType; + + void operator()(SizeT /*old_i*/ + ) const + { + Access(*left) = absl::forward(other); + } + + template + void operator()(SizeT /*old_i*/ + ) const + { + using New = + typename absl::variant_alternative::type; + if (std::is_nothrow_constructible::value || + !std::is_nothrow_move_constructible::value) + { + left->template emplace( + absl::forward(other) + ); + } + else + { + // the standard says "equivalent to + // operator=(variant(std::forward(t)))", but we use `emplace` here + // because the variant's move assignment operator could be deleted. + left->template emplace( + New(absl::forward(other)) + ); + } + } + + Left* left; + QualifiedNew&& other; + }; + + template + static ConversionAssignVisitor + MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) + { + return {left, absl::forward(qual)}; + } + + // Backend for operations for `emplace()` which destructs `*self` then + // construct a new alternative with `Args...`. + template + static typename absl::variant_alternative::type& Replace( + Self* self, Args&&... args + ) + { + Destroy(*self); + using New = typename absl::variant_alternative::type; + New* const result = ::new (static_cast(&self->state_)) + New(absl::forward(args)...); + self->index_ = NewIndex; + return *result; + } + + template + struct InitFromVisitor + { + template + void operator()(SizeT /*new_i*/) const + { + using Alternative = + typename variant_alternative::type; + ::new (static_cast(&left->state_)) Alternative( + Access(std::forward(right)) + ); + } + + void operator()(SizeT /*new_i*/) const + { + // This space intentionally left blank. + } + LeftVariant* left; + QualifiedRightVariant&& right; + }; + }; + + template + struct IndexOfImpl; + + template + struct IndexOfImpl + { + using IndexFromEnd = SizeT<0>; + using MatchedIndexFromEnd = IndexFromEnd; + using MultipleMatches = std::false_type; + }; + + template + struct IndexOfImpl : IndexOfImpl + { + using IndexFromEnd = + SizeT::IndexFromEnd::value + 1>; + }; + + template + struct IndexOfImpl : IndexOfImpl + { + using IndexFromEnd = + SizeT::IndexFromEnd::value + 1>; + using MatchedIndexFromEnd = IndexFromEnd; + using MultipleMatches = std::integral_constant< + bool, + IndexOfImpl::MatchedIndexFromEnd::value != 0>; + }; + + template + struct IndexOfMeta + { + using Results = IndexOfImpl; + static_assert(!Results::MultipleMatches::value, "Attempted to access a variant by specifying a type that " + "matches more than one alternative."); + static_assert(Results::MatchedIndexFromEnd::value != 0, "Attempted to access a variant by specifying a type that does " + "not match any alternative."); + using type = SizeT; + }; + + template + using IndexOf = typename IndexOfMeta::type; + + template + struct UnambiguousIndexOfImpl; + + // Terminating case encountered once we've checked all of the alternatives + template + struct UnambiguousIndexOfImpl, T, CurrIndex> : SizeT + { + }; + + // Case where T is not Head + template + struct UnambiguousIndexOfImpl, T, CurrIndex> : UnambiguousIndexOfImpl, T, CurrIndex + 1>::type + { + }; + + // Case where T is Head + template + struct UnambiguousIndexOfImpl, Head, CurrIndex> : SizeT, Head, 0>::value == sizeof...(Tail) ? CurrIndex : CurrIndex + sizeof...(Tail) + 1> + { + }; + + template + struct UnambiguousIndexOf; + + struct NoMatch + { + struct type + { + }; + }; + + template + struct UnambiguousIndexOf, T> : std::conditional, T, 0>::value != sizeof...(Alts), UnambiguousIndexOfImpl, T, 0>, NoMatch>::type::type + { + }; + + template + using UnambiguousTypeOfImpl = T; + + template + using UnambiguousTypeOfT = + UnambiguousTypeOfImpl::value>; + + template + class VariantStateBase; + + // This is an implementation of the "imaginary function" that is described in + // [variant.ctor] + // It is used in order to determine which alternative to construct during + // initialization from some type T. + template + struct ImaginaryFun; + + template + struct ImaginaryFun, I> + { + static void Run() = delete; + }; + + template + struct ImaginaryFun, I> : ImaginaryFun, I + 1> + { + using ImaginaryFun, I + 1>::Run; + + // NOTE: const& and && are used instead of by-value due to lack of guaranteed + // move elision of C++17. This may have other minor differences, but tests + // pass. + static SizeT Run(const H&, SizeT); + static SizeT Run(H&&, SizeT); + }; + + // The following metafunctions are used in constructor and assignment + // constraints. + template + struct IsNeitherSelfNorInPlace : std::true_type + { + }; + + template + struct IsNeitherSelfNorInPlace : std::false_type + { + }; + + template + struct IsNeitherSelfNorInPlace> : std::false_type + { + }; + + template + struct IsNeitherSelfNorInPlace> : std::false_type + { + }; + + template + struct IndexOfConstructedType< + Variant, + T, + void_t::Run(std::declval(), {}))>> : decltype(ImaginaryFun::Run(std::declval(), {})) + { + }; + + template + struct ContainsVariantNPos : absl::negation, + std::integer_sequence>> + { + }; + + template + using RawVisitResult = + absl::result_of_t...)>; + + // NOTE: The spec requires that all return-paths yield the same type and is not + // SFINAE-friendly, so we can deduce the return type by examining the first + // result. If it's not callable, then we get an error, but are compliant and + // fast to compile. + // TODO(calabrese) Possibly rewrite in a way that yields better compile errors + // at the cost of longer compile-times. + template + struct VisitResultImpl + { + using type = + absl::result_of_t...)>; + }; + + // Done in two steps intentionally so that we don't cause substitution to fail. + template + using VisitResult = typename VisitResultImpl::type; + + template + struct PerformVisitation + { + using ReturnType = VisitResult; + + template + constexpr ReturnType operator()(SizeT... indices) const + { + return Run(typename ContainsVariantNPos::type{}, absl::index_sequence_for(), indices...); + } + + template + constexpr ReturnType Run(std::false_type /*has_valueless*/, index_sequence, SizeT...) const + { + static_assert( + std::is_same...)>>::value, + "All visitation overloads must have the same return type." + ); + return absl::base_internal::invoke( + absl::forward(op), + VariantCoreAccess::Access( + absl::forward(std::get(variant_tup)) + )... + ); + } + + template + [[noreturn]] ReturnType Run(std::true_type /*has_valueless*/, index_sequence, SizeT...) const + { + absl::variant_internal::ThrowBadVariantAccess(); + } + + // TODO(calabrese) Avoid using a tuple, which causes lots of instantiations + // Attempts using lambda variadic captures fail on current GCC. + std::tuple variant_tup; + Op&& op; + }; + + template + union Union; + + // We want to allow for variant<> to be trivial. For that, we need the default + // constructor to be trivial, which means we can't define it ourselves. + // Instead, we use a non-default constructor that takes NoopConstructorTag + // that doesn't affect the triviality of the types. + struct NoopConstructorTag + { + }; + + template + struct EmplaceTag + { + }; + + template<> + union Union<> + { + constexpr explicit Union(NoopConstructorTag) noexcept + { + } + }; + +// Suppress bogus warning on MSVC: MSVC complains that Union has a defined +// deleted destructor from the `std::is_destructible` check below. +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4624) +#endif // _MSC_VER + + template + union Union + { + using TailUnion = Union; + + explicit constexpr Union(NoopConstructorTag /*tag*/) noexcept + : + tail(NoopConstructorTag()) + { + } + + template + explicit constexpr Union(EmplaceTag<0>, P&&... args) : + head(absl::forward

(args)...) + { + } + + template + explicit constexpr Union(EmplaceTag, P&&... args) : + tail(EmplaceTag{}, absl::forward

(args)...) + { + } + + Head head; + TailUnion tail; + }; + +#ifdef _MSC_VER +#pragma warning(pop) +#endif // _MSC_VER + + // TODO(calabrese) Just contain a Union in this union (certain configs fail). + template + union DestructibleUnionImpl; + + template<> + union DestructibleUnionImpl<> + { + constexpr explicit DestructibleUnionImpl(NoopConstructorTag) noexcept + { + } + }; + + template + union DestructibleUnionImpl + { + using TailUnion = DestructibleUnionImpl; + + explicit constexpr DestructibleUnionImpl(NoopConstructorTag /*tag*/) noexcept + : + tail(NoopConstructorTag()) + { + } + + template + explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args) : + head(absl::forward

(args)...) + { + } + + template + explicit constexpr DestructibleUnionImpl(EmplaceTag, P&&... args) : + tail(EmplaceTag{}, absl::forward

(args)...) + { + } + + ~DestructibleUnionImpl() + { + } + + Head head; + TailUnion tail; + }; + + // This union type is destructible even if one or more T are not trivially + // destructible. In the case that all T are trivially destructible, then so is + // this resultant type. + template + using DestructibleUnion = + absl::conditional_t>::value, Union, DestructibleUnionImpl>; + + // Deepest base, containing the actual union and the discriminator + template + class VariantStateBase + { + protected: + using Variant = variant; + + template::value, LazyH>> + constexpr VariantStateBase() noexcept( + std::is_nothrow_default_constructible::value + ) : + state_(EmplaceTag<0>()), + index_(0) + { + } + + template + explicit constexpr VariantStateBase(EmplaceTag tag, P&&... args) : + state_(tag, absl::forward

(args)...), + index_(I) + { + } + + explicit constexpr VariantStateBase(NoopConstructorTag) : + state_(NoopConstructorTag()), + index_(variant_npos) + { + } + + void destroy() + { + } // Does nothing (shadowed in child if non-trivial) + + DestructibleUnion state_; + std::size_t index_; + }; + + using absl::internal::identity; + + // OverloadSet::Overload() is a unary function which is overloaded to + // take any of the element types of the variant, by reference-to-const. + // The return type of the overload on T is identity, so that you + // can statically determine which overload was called. + // + // Overload() is not defined, so it can only be called in unevaluated + // contexts. + template + struct OverloadSet; + + template + struct OverloadSet : OverloadSet + { + using Base = OverloadSet; + static identity Overload(const T&); + using Base::Overload; + }; + + template<> + struct OverloadSet<> + { + // For any case not handled above. + static void Overload(...); + }; + + template + using LessThanResult = decltype(std::declval() < std::declval()); + + template + using GreaterThanResult = decltype(std::declval() > std::declval()); + + template + using LessThanOrEqualResult = decltype(std::declval() <= std::declval()); + + template + using GreaterThanOrEqualResult = + decltype(std::declval() >= std::declval()); + + template + using EqualResult = decltype(std::declval() == std::declval()); + + template + using NotEqualResult = decltype(std::declval() != std::declval()); + + using type_traits_internal::is_detected_convertible; + + template + using RequireAllHaveEqualT = absl::enable_if_t< + absl::conjunction...>::value, + bool>; + + template + using RequireAllHaveNotEqualT = + absl::enable_if_t...>::value, bool>; + + template + using RequireAllHaveLessThanT = + absl::enable_if_t...>::value, bool>; + + template + using RequireAllHaveLessThanOrEqualT = + absl::enable_if_t...>::value, bool>; + + template + using RequireAllHaveGreaterThanOrEqualT = + absl::enable_if_t...>::value, bool>; + + template + using RequireAllHaveGreaterThanT = + absl::enable_if_t...>::value, bool>; + + // Helper template containing implementations details of variant that can't go + // in the private section. For convenience, this takes the variant type as a + // single template parameter. + template + struct VariantHelper; + + template + struct VariantHelper> + { + // Type metafunction which returns the element type selected if + // OverloadSet::Overload() is well-formed when called with argument type U. + template + using BestMatch = decltype(variant_internal::OverloadSet::Overload( + std::declval() + )); + + // Type metafunction which returns true if OverloadSet::Overload() is + // well-formed when called with argument type U. + // CanAccept can't be just an alias because there is a MSVC bug on parameter + // pack expansion involving decltype. + template + struct CanAccept : std::integral_constant>::value> + { + }; + + // Type metafunction which returns true if Other is an instantiation of + // variant, and variants's converting constructor from Other will be + // well-formed. We will use this to remove constructors that would be + // ill-formed from the overload set. + template + struct CanConvertFrom; + + template + struct CanConvertFrom> : public absl::conjunction...> + { + }; + }; + + // A type with nontrivial copy ctor and trivial move ctor. + struct TrivialMoveOnly + { + TrivialMoveOnly(TrivialMoveOnly&&) = default; + }; + + // Trait class to detect whether a type is trivially move constructible. + // A union's defaulted copy/move constructor is deleted if any variant member's + // copy/move constructor is nontrivial. + template + struct IsTriviallyMoveConstructible : std::is_move_constructible> + { + }; + + // To guarantee triviality of all special-member functions that can be trivial, + // we use a chain of conditional bases for each one. + // The order of inheritance of bases from child to base are logically: + // + // variant + // VariantCopyAssignBase + // VariantMoveAssignBase + // VariantCopyBase + // VariantMoveBase + // VariantStateBaseDestructor + // VariantStateBase + // + // Note that there is a separate branch at each base that is dependent on + // whether or not that corresponding special-member-function can be trivial in + // the resultant variant type. + + template + class VariantStateBaseDestructorNontrivial; + + template + class VariantMoveBaseNontrivial; + + template + class VariantCopyBaseNontrivial; + + template + class VariantMoveAssignBaseNontrivial; + + template + class VariantCopyAssignBaseNontrivial; + + // Base that is dependent on whether or not the destructor can be trivial. + template + using VariantStateBaseDestructor = + absl::conditional_t>::value, VariantStateBase, VariantStateBaseDestructorNontrivial>; + + // Base that is dependent on whether or not the move-constructor can be + // implicitly generated by the compiler (trivial or deleted). + // Previously we were using `std::is_move_constructible>` to check + // whether all Ts have trivial move constructor, but it ran into a GCC bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84866 + // So we have to use a different approach (i.e. `HasTrivialMoveConstructor`) to + // work around the bug. + template + using VariantMoveBase = absl::conditional_t< + absl::disjunction< + absl::negation...>>, + absl::conjunction...>>::value, + VariantStateBaseDestructor, + VariantMoveBaseNontrivial>; + + // Base that is dependent on whether or not the copy-constructor can be trivial. + template + using VariantCopyBase = absl::conditional_t< + absl::disjunction< + absl::negation...>>, + std::is_copy_constructible>>::value, + VariantMoveBase, + VariantCopyBaseNontrivial>; + + // Base that is dependent on whether or not the move-assign can be trivial. + template + using VariantMoveAssignBase = absl::conditional_t< + absl::disjunction< + absl::conjunction>, std::is_move_constructible>, std::is_destructible>>, + absl::negation..., + // Note: We're not qualifying this with + // absl:: because it doesn't compile + // under MSVC. + is_move_assignable...>>>::value, + VariantCopyBase, + VariantMoveAssignBaseNontrivial>; + + // Base that is dependent on whether or not the copy-assign can be trivial. + template + using VariantCopyAssignBase = absl::conditional_t< + absl::disjunction< + absl::conjunction>, std::is_copy_constructible>, std::is_destructible>>, + absl::negation..., + // Note: We're not qualifying this with + // absl:: because it doesn't compile + // under MSVC. + is_copy_assignable...>>>::value, + VariantMoveAssignBase, + VariantCopyAssignBaseNontrivial>; + + template + using VariantBase = VariantCopyAssignBase; + + template + class VariantStateBaseDestructorNontrivial : protected VariantStateBase + { + private: + using Base = VariantStateBase; + + protected: + using Base::Base; + + VariantStateBaseDestructorNontrivial() = default; + VariantStateBaseDestructorNontrivial(VariantStateBaseDestructorNontrivial&&) = + default; + VariantStateBaseDestructorNontrivial( + const VariantStateBaseDestructorNontrivial& + ) = default; + VariantStateBaseDestructorNontrivial& operator=( + VariantStateBaseDestructorNontrivial&& + ) = default; + VariantStateBaseDestructorNontrivial& operator=( + const VariantStateBaseDestructorNontrivial& + ) = default; + + struct Destroyer + { + template + void operator()(SizeT i) const + { + using Alternative = + typename absl::variant_alternative>::type; + variant_internal::AccessUnion(self->state_, i).~Alternative(); + } + + void operator()(SizeT /*i*/) const + { + // This space intentionally left blank + } + + VariantStateBaseDestructorNontrivial* self; + }; + + void destroy() + { + VisitIndices::Run(Destroyer{this}, index_); + } + + ~VariantStateBaseDestructorNontrivial() + { + destroy(); + } + + protected: + using Base::index_; + using Base::state_; + }; + + template + class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor + { + private: + using Base = VariantStateBaseDestructor; + + protected: + using Base::Base; + + struct Construct + { + template + void operator()(SizeT i) const + { + using Alternative = + typename absl::variant_alternative>::type; + ::new (static_cast(&self->state_)) Alternative( + variant_internal::AccessUnion(absl::move(other->state_), i) + ); + } + + void operator()(SizeT /*i*/) const + { + } + + VariantMoveBaseNontrivial* self; + VariantMoveBaseNontrivial* other; + }; + + VariantMoveBaseNontrivial() = default; + VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept( + absl::conjunction...>::value + ) : + Base(NoopConstructorTag()) + { + VisitIndices::Run(Construct{this, &other}, other.index_); + index_ = other.index_; + } + + VariantMoveBaseNontrivial(VariantMoveBaseNontrivial const&) = default; + + VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial&&) = default; + VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial const&) = + default; + + protected: + using Base::index_; + using Base::state_; + }; + + template + class VariantCopyBaseNontrivial : protected VariantMoveBase + { + private: + using Base = VariantMoveBase; + + protected: + using Base::Base; + + VariantCopyBaseNontrivial() = default; + VariantCopyBaseNontrivial(VariantCopyBaseNontrivial&&) = default; + + struct Construct + { + template + void operator()(SizeT i) const + { + using Alternative = + typename absl::variant_alternative>::type; + ::new (static_cast(&self->state_)) + Alternative(variant_internal::AccessUnion(other->state_, i)); + } + + void operator()(SizeT /*i*/) const + { + } + + VariantCopyBaseNontrivial* self; + const VariantCopyBaseNontrivial* other; + }; + + VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other) : + Base(NoopConstructorTag()) + { + VisitIndices::Run(Construct{this, &other}, other.index_); + index_ = other.index_; + } + + VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial&&) = default; + VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial const&) = + default; + + protected: + using Base::index_; + using Base::state_; + }; + + template + class VariantMoveAssignBaseNontrivial : protected VariantCopyBase + { + friend struct VariantCoreAccess; + + private: + using Base = VariantCopyBase; + + protected: + using Base::Base; + + VariantMoveAssignBaseNontrivial() = default; + VariantMoveAssignBaseNontrivial(VariantMoveAssignBaseNontrivial&&) = default; + VariantMoveAssignBaseNontrivial(const VariantMoveAssignBaseNontrivial&) = + default; + VariantMoveAssignBaseNontrivial& operator=( + VariantMoveAssignBaseNontrivial const& + ) = default; + + VariantMoveAssignBaseNontrivial& + operator=(VariantMoveAssignBaseNontrivial&& other) noexcept( + absl::conjunction..., std::is_nothrow_move_assignable...>::value + ) + { + VisitIndices::Run( + VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_ + ); + return *this; + } + + protected: + using Base::index_; + using Base::state_; + }; + + template + class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase + { + friend struct VariantCoreAccess; + + private: + using Base = VariantMoveAssignBase; + + protected: + using Base::Base; + + VariantCopyAssignBaseNontrivial() = default; + VariantCopyAssignBaseNontrivial(VariantCopyAssignBaseNontrivial&&) = default; + VariantCopyAssignBaseNontrivial(const VariantCopyAssignBaseNontrivial&) = + default; + VariantCopyAssignBaseNontrivial& operator=( + VariantCopyAssignBaseNontrivial&& + ) = default; + + VariantCopyAssignBaseNontrivial& operator=( + const VariantCopyAssignBaseNontrivial& other + ) + { + VisitIndices::Run( + VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_ + ); + return *this; + } + + protected: + using Base::index_; + using Base::state_; + }; + + //////////////////////////////////////// + // Visitors for Comparison Operations // + //////////////////////////////////////// + + template + struct EqualsOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return true; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) == VariantCoreAccess::Access(*w); + } + }; + + template + struct NotEqualsOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return false; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) != VariantCoreAccess::Access(*w); + } + }; + + template + struct LessThanOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return false; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) < VariantCoreAccess::Access(*w); + } + }; + + template + struct GreaterThanOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return false; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) > VariantCoreAccess::Access(*w); + } + }; + + template + struct LessThanOrEqualsOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return true; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) <= VariantCoreAccess::Access(*w); + } + }; + + template + struct GreaterThanOrEqualsOp + { + const variant* v; + const variant* w; + + constexpr bool operator()(SizeT /*v_i*/) const + { + return true; + } + + template + constexpr bool operator()(SizeT /*v_i*/) const + { + return VariantCoreAccess::Access(*v) >= VariantCoreAccess::Access(*w); + } + }; + + // Precondition: v.index() == w.index(); + template + struct SwapSameIndex + { + variant* v; + variant* w; + template + void operator()(SizeT) const + { + type_traits_internal::Swap(VariantCoreAccess::Access(*v), VariantCoreAccess::Access(*w)); + } + + void operator()(SizeT) const + { + } + }; + + // TODO(calabrese) do this from a different namespace for proper adl usage + template + struct Swap + { + variant* v; + variant* w; + + void generic_swap() const + { + variant tmp(std::move(*w)); + VariantCoreAccess::Destroy(*w); + VariantCoreAccess::InitFrom(*w, std::move(*v)); + VariantCoreAccess::Destroy(*v); + VariantCoreAccess::InitFrom(*v, std::move(tmp)); + } + + void operator()(SizeT /*w_i*/) const + { + if (!v->valueless_by_exception()) + { + generic_swap(); + } + } + + template + void operator()(SizeT /*w_i*/) + { + if (v->index() == Wi) + { + VisitIndices::Run(SwapSameIndex{v, w}, Wi); + } + else + { + generic_swap(); + } + } + }; + + template + struct VariantHashBase + { + VariantHashBase() = delete; + VariantHashBase(const VariantHashBase&) = delete; + VariantHashBase(VariantHashBase&&) = delete; + VariantHashBase& operator=(const VariantHashBase&) = delete; + VariantHashBase& operator=(VariantHashBase&&) = delete; + }; + + struct VariantHashVisitor + { + template + size_t operator()(const T& t) + { + return std::hash{}(t); + } + }; + + template + struct VariantHashBase...>::value>, Ts...> + { + using argument_type = Variant; + using result_type = size_t; + size_t operator()(const Variant& var) const + { + type_traits_internal::AssertHashEnabled(); + if (var.valueless_by_exception()) + { + return 239799884; + } + size_t result = VisitIndices::value>::Run( + PerformVisitation{ + std::forward_as_tuple(var), VariantHashVisitor{}}, + var.index() + ); + // Combine the index and the hash result in order to distinguish + // std::variant holding the same value as different alternative. + return result ^ var.index(); + } + }; + + } // namespace variant_internal + ABSL_NAMESPACE_END +} // namespace absl + +#endif // !defined(ABSL_USES_STD_VARIANT) +#endif // ABSL_TYPES_INTERNAL_VARIANT_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/optional.h b/CAPI/cpp/grpc/include/absl/types/optional.h new file mode 100644 index 00000000..7f91bb8a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/optional.h @@ -0,0 +1,812 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// optional.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::optional` type for holding a value which +// may or may not be present. This type is useful for providing value semantics +// for operations that may either wish to return or hold "something-or-nothing". +// +// Example: +// +// // A common way to signal operation failure is to provide an output +// // parameter and a bool return type: +// bool AcquireResource(const Input&, Resource * out); +// +// // Providing an absl::optional return type provides a cleaner API: +// absl::optional AcquireResource(const Input&); +// +// `absl::optional` is a C++11 compatible version of the C++17 `std::optional` +// abstraction and is designed to be a drop-in replacement for code compliant +// with C++17. +#ifndef ABSL_TYPES_OPTIONAL_H_ +#define ABSL_TYPES_OPTIONAL_H_ + +#include "absl/base/config.h" // TODO(calabrese) IWYU removal? +#include "absl/utility/utility.h" + +#ifdef ABSL_USES_STD_OPTIONAL + +#include // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::bad_optional_access; + using std::make_optional; + using std::nullopt; + using std::nullopt_t; + using std::optional; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_OPTIONAL + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/meta/type_traits.h" +#include "absl/types/bad_optional_access.h" +#include "absl/types/internal/optional.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // nullopt_t + // + // Class type for `absl::nullopt` used to indicate an `absl::optional` type + // that does not contain a value. + struct nullopt_t + { + // It must not be default-constructible to avoid ambiguity for opt = {}. + explicit constexpr nullopt_t(optional_internal::init_t) noexcept + { + } + }; + + // nullopt + // + // A tag constant of type `absl::nullopt_t` used to indicate an empty + // `absl::optional` in certain functions, such as construction or assignment. + ABSL_INTERNAL_INLINE_CONSTEXPR(nullopt_t, nullopt, nullopt_t(optional_internal::init_t())); + + // ----------------------------------------------------------------------------- + // absl::optional + // ----------------------------------------------------------------------------- + // + // A value of type `absl::optional` holds either a value of `T` or an + // "empty" value. When it holds a value of `T`, it stores it as a direct + // sub-object, so `sizeof(optional)` is approximately + // `sizeof(T) + sizeof(bool)`. + // + // This implementation is based on the specification in the latest draft of the + // C++17 `std::optional` specification as of May 2017, section 20.6. + // + // Differences between `absl::optional` and `std::optional` include: + // + // * `constexpr` is not used for non-const member functions. + // (dependency on some differences between C++11 and C++14.) + // * `absl::nullopt` and `absl::in_place` are not declared `constexpr`. We + // need the inline variable support in C++17 for external linkage. + // * Throws `absl::bad_optional_access` instead of + // `std::bad_optional_access`. + // * `make_optional()` cannot be declared `constexpr` due to the absence of + // guaranteed copy elision. + // * The move constructor's `noexcept` specification is stronger, i.e. if the + // default allocator is non-throwing (via setting + // `ABSL_ALLOCATOR_NOTHROW`), it evaluates to `noexcept(true)`, because + // we assume + // a) move constructors should only throw due to allocation failure and + // b) if T's move constructor allocates, it uses the same allocation + // function as the default allocator. + // + template + class optional : private optional_internal::optional_data, private optional_internal::optional_ctor_base::traits>, private optional_internal::optional_assign_base::traits> + { + using data_base = optional_internal::optional_data; + + public: + typedef T value_type; + + // Constructors + + // Constructs an `optional` holding an empty value, NOT a default constructed + // `T`. + constexpr optional() noexcept = default; + + // Constructs an `optional` initialized with `nullopt` to hold an empty value. + constexpr optional(nullopt_t) noexcept + { + } // NOLINT(runtime/explicit) + + // Copy constructor, standard semantics + optional(const optional&) = default; + + // Move constructor, standard semantics + optional(optional&&) = default; + + // Constructs a non-empty `optional` direct-initialized value of type `T` from + // the arguments `std::forward(args)...` within the `optional`. + // (The `in_place_t` is a tag used to indicate that the contained object + // should be constructed in-place.) + template, std::is_constructible>::value>* = nullptr> + constexpr explicit optional(InPlaceT, Args&&... args) : + data_base(in_place_t(), absl::forward(args)...) + { + } + + // Constructs a non-empty `optional` direct-initialized value of type `T` from + // the arguments of an initializer_list and `std::forward(args)...`. + // (The `in_place_t` is a tag used to indicate that the contained object + // should be constructed in-place.) + template&, Args&&...>::value>::type> + constexpr explicit optional(in_place_t, std::initializer_list il, Args&&... args) : + data_base(in_place_t(), il, absl::forward(args)...) + { + } + + // Value constructor (implicit) + template< + typename U = T, + typename std::enable_if< + absl::conjunction::type>>, absl::negation, typename std::decay::type>>, std::is_convertible, std::is_constructible>::value, + bool>::type = false> + constexpr optional(U&& v) : + data_base(in_place_t(), absl::forward(v)) + { + } + + // Value constructor (explicit) + template< + typename U = T, + typename std::enable_if< + absl::conjunction::type>>, absl::negation, typename std::decay::type>>, absl::negation>, std::is_constructible>::value, + bool>::type = false> + explicit constexpr optional(U&& v) : + data_base(in_place_t(), absl::forward(v)) + { + } + + // Converting copy constructor (implicit) + template>, std::is_constructible, absl::negation>, std::is_convertible>::value, bool>::type = false> + optional(const optional& rhs) + { + if (rhs) + { + this->construct(*rhs); + } + } + + // Converting copy constructor (explicit) + template>, std::is_constructible, absl::negation>, absl::negation>>::value, bool>::type = false> + explicit optional(const optional& rhs) + { + if (rhs) + { + this->construct(*rhs); + } + } + + // Converting move constructor (implicit) + template>, std::is_constructible, absl::negation>, std::is_convertible>::value, bool>::type = false> + optional(optional&& rhs) + { + if (rhs) + { + this->construct(std::move(*rhs)); + } + } + + // Converting move constructor (explicit) + template< + typename U, + typename std::enable_if< + absl::conjunction< + absl::negation>, + std::is_constructible, + absl::negation< + optional_internal::is_constructible_convertible_from_optional< + T, + U>>, + absl::negation>>::value, + bool>::type = false> + explicit optional(optional&& rhs) + { + if (rhs) + { + this->construct(std::move(*rhs)); + } + } + + // Destructor. Trivial if `T` is trivially destructible. + ~optional() = default; + + // Assignment Operators + + // Assignment from `nullopt` + // + // Example: + // + // struct S { int value; }; + // optional opt = absl::nullopt; // Could also use opt = { }; + optional& operator=(nullopt_t) noexcept + { + this->destruct(); + return *this; + } + + // Copy assignment operator, standard semantics + optional& operator=(const optional& src) = default; + + // Move assignment operator, standard semantics + optional& operator=(optional&& src) = default; + + // Value assignment operators + template, typename std::decay::type>>, absl::negation, std::is_same::type>>>, std::is_constructible, std::is_assignable>::value>::type> + optional& operator=(U&& v) + { + this->assign(std::forward(v)); + return *this; + } + + template< + typename U, + int&..., // Workaround an internal compiler error in GCC 5 to 10. + typename = typename std::enable_if>, + std::is_constructible, + std::is_assignable, + absl::negation< + optional_internal:: + is_constructible_convertible_assignable_from_optional< + T, + U>>>::value>::type> + optional& operator=(const optional& rhs) + { + if (rhs) + { + this->assign(*rhs); + } + else + { + this->destruct(); + } + return *this; + } + + template>, std::is_constructible, std::is_assignable, absl::negation>>::value>::type> + optional& operator=(optional&& rhs) + { + if (rhs) + { + this->assign(std::move(*rhs)); + } + else + { + this->destruct(); + } + return *this; + } + + // Modifiers + + // optional::reset() + // + // Destroys the inner `T` value of an `absl::optional` if one is present. + ABSL_ATTRIBUTE_REINITIALIZES void reset() noexcept + { + this->destruct(); + } + + // optional::emplace() + // + // (Re)constructs the underlying `T` in-place with the given forwarded + // arguments. + // + // Example: + // + // optional opt; + // opt.emplace(arg1,arg2,arg3); // Constructs Foo(arg1,arg2,arg3) + // + // If the optional is non-empty, and the `args` refer to subobjects of the + // current object, then behaviour is undefined, because the current object + // will be destructed before the new object is constructed with `args`. + template::value>::type> + T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + this->destruct(); + this->construct(std::forward(args)...); + return reference(); + } + + // Emplace reconstruction overload for an initializer list and the given + // forwarded arguments. + // + // Example: + // + // struct Foo { + // Foo(std::initializer_list); + // }; + // + // optional opt; + // opt.emplace({1,2,3}); // Constructs Foo({1,2,3}) + template&, Args&&...>::value>::type> + T& emplace(std::initializer_list il, Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND + { + this->destruct(); + this->construct(il, std::forward(args)...); + return reference(); + } + + // Swaps + + // Swap, standard semantics + void swap(optional& rhs) noexcept( + std::is_nothrow_move_constructible::value&& + type_traits_internal::IsNothrowSwappable::value + ) + { + if (*this) + { + if (rhs) + { + type_traits_internal::Swap(**this, *rhs); + } + else + { + rhs.construct(std::move(**this)); + this->destruct(); + } + } + else + { + if (rhs) + { + this->construct(std::move(*rhs)); + rhs.destruct(); + } + else + { + // No effect (swap(disengaged, disengaged)). + } + } + } + + // Observers + + // optional::operator->() + // + // Accesses the underlying `T` value's member `m` of an `optional`. If the + // `optional` is empty, behavior is undefined. + // + // If you need myOpt->foo in constexpr, use (*myOpt).foo instead. + const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(this->engaged_); + return std::addressof(this->data_); + } + T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(this->engaged_); + return std::addressof(this->data_); + } + + // optional::operator*() + // + // Accesses the underlying `T` value of an `optional`. If the `optional` is + // empty, behavior is undefined. + constexpr const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return ABSL_HARDENING_ASSERT(this->engaged_), reference(); + } + T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(this->engaged_); + return reference(); + } + constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference()); + } + T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND + { + ABSL_HARDENING_ASSERT(this->engaged_); + return std::move(reference()); + } + + // optional::operator bool() + // + // Returns false if and only if the `optional` is empty. + // + // if (opt) { + // // do something with *opt or opt->; + // } else { + // // opt is empty. + // } + // + constexpr explicit operator bool() const noexcept + { + return this->engaged_; + } + + // optional::has_value() + // + // Determines whether the `optional` contains a value. Returns `false` if and + // only if `*this` is empty. + constexpr bool has_value() const noexcept + { + return this->engaged_; + } + +// Suppress bogus warning on MSVC: MSVC complains call to reference() after +// throw_bad_optional_access() is unreachable. +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4702) +#endif // _MSC_VER + // optional::value() + // + // Returns a reference to an `optional`s underlying value. The constness + // and lvalue/rvalue-ness of the `optional` is preserved to the view of + // the `T` sub-object. Throws `absl::bad_optional_access` when the `optional` + // is empty. + constexpr const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return static_cast(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference()); + } + T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND + { + return static_cast(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference()); + } + T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND + { // NOLINT(build/c++11) + return std::move( + static_cast(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference()) + ); + } + constexpr const T&& value() + const&& ABSL_ATTRIBUTE_LIFETIME_BOUND + { // NOLINT(build/c++11) + return absl::move( + static_cast(*this) ? reference() : (optional_internal::throw_bad_optional_access(), reference()) + ); + } +#ifdef _MSC_VER +#pragma warning(pop) +#endif // _MSC_VER + + // optional::value_or() + // + // Returns either the value of `T` or a passed default `v` if the `optional` + // is empty. + template + constexpr T value_or(U&& v) const& + { + static_assert(std::is_copy_constructible::value, "optional::value_or: T must be copy constructible"); + static_assert(std::is_convertible::value, "optional::value_or: U must be convertible to T"); + return static_cast(*this) ? **this : static_cast(absl::forward(v)); + } + template + T value_or(U&& v) && + { // NOLINT(build/c++11) + static_assert(std::is_move_constructible::value, "optional::value_or: T must be move constructible"); + static_assert(std::is_convertible::value, "optional::value_or: U must be convertible to T"); + return static_cast(*this) ? std::move(**this) : static_cast(std::forward(v)); + } + + private: + // Private accessors for internal storage viewed as reference to T. + constexpr const T& reference() const + { + return this->data_; + } + T& reference() + { + return this->data_; + } + + // T constraint checks. You can't have an optional of nullopt_t, in_place_t + // or a reference. + static_assert( + !std::is_same::type>::value, + "optional is not allowed." + ); + static_assert( + !std::is_same::type>::value, + "optional is not allowed." + ); + static_assert(!std::is_reference::value, "optional is not allowed."); + }; + + // Non-member functions + + // swap() + // + // Performs a swap between two `absl::optional` objects, using standard + // semantics. + template::value && type_traits_internal::IsSwappable::value, bool>::type = false> + void swap(optional& a, optional& b) noexcept(noexcept(a.swap(b))) + { + a.swap(b); + } + + // make_optional() + // + // Creates a non-empty `optional` where the type of `T` is deduced. An + // `absl::optional` can also be explicitly instantiated with + // `make_optional(v)`. + // + // Note: `make_optional()` constructions may be declared `constexpr` for + // trivially copyable types `T`. Non-trivial types require copy elision + // support in C++17 for `make_optional` to support `constexpr` on such + // non-trivial types. + // + // Example: + // + // constexpr absl::optional opt = absl::make_optional(1); + // static_assert(opt.value() == 1, ""); + template + constexpr optional::type> make_optional(T&& v) + { + return optional::type>(absl::forward(v)); + } + + template + constexpr optional make_optional(Args&&... args) + { + return optional(in_place_t(), absl::forward(args)...); + } + + template + constexpr optional make_optional(std::initializer_list il, Args&&... args) + { + return optional(in_place_t(), il, absl::forward(args)...); + } + + // Relational operators [optional.relops] + + // Empty optionals are considered equal to each other and less than non-empty + // optionals. Supports relations between optional and optional, between + // optional and U, and between optional and nullopt. + // + // Note: We're careful to support T having non-bool relationals. + + // Requires: The expression, e.g. "*x == *y" shall be well-formed and its result + // shall be convertible to bool. + // The C++17 (N4606) "Returns:" statements are translated into + // code in an obvious way here, and the original text retained as function docs. + // Returns: If bool(x) != bool(y), false; otherwise if bool(x) == false, true; + // otherwise *x == *y. + template + constexpr auto operator==(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x == *y)) + { + return static_cast(x) != static_cast(y) ? false : static_cast(x) == false ? true : + static_cast(*x == *y); + } + + // Returns: If bool(x) != bool(y), true; otherwise, if bool(x) == false, false; + // otherwise *x != *y. + template + constexpr auto operator!=(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x != *y)) + { + return static_cast(x) != static_cast(y) ? true : static_cast(x) == false ? false : + static_cast(*x != *y); + } + // Returns: If !y, false; otherwise, if !x, true; otherwise *x < *y. + template + constexpr auto operator<(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x < *y)) + { + return !y ? false : !x ? true : + static_cast(*x < *y); + } + // Returns: If !x, false; otherwise, if !y, true; otherwise *x > *y. + template + constexpr auto operator>(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x > *y)) + { + return !x ? false : !y ? true : + static_cast(*x > *y); + } + // Returns: If !x, true; otherwise, if !y, false; otherwise *x <= *y. + template + constexpr auto operator<=(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x <= *y)) + { + return !x ? true : !y ? false : + static_cast(*x <= *y); + } + // Returns: If !y, true; otherwise, if !x, false; otherwise *x >= *y. + template + constexpr auto operator>=(const optional& x, const optional& y) + -> decltype(optional_internal::convertible_to_bool(*x >= *y)) + { + return !y ? true : !x ? false : + static_cast(*x >= *y); + } + + // Comparison with nullopt [optional.nullops] + // The C++17 (N4606) "Returns:" statements are used directly here. + template + constexpr bool operator==(const optional& x, nullopt_t) noexcept + { + return !x; + } + template + constexpr bool operator==(nullopt_t, const optional& x) noexcept + { + return !x; + } + template + constexpr bool operator!=(const optional& x, nullopt_t) noexcept + { + return static_cast(x); + } + template + constexpr bool operator!=(nullopt_t, const optional& x) noexcept + { + return static_cast(x); + } + template + constexpr bool operator<(const optional&, nullopt_t) noexcept + { + return false; + } + template + constexpr bool operator<(nullopt_t, const optional& x) noexcept + { + return static_cast(x); + } + template + constexpr bool operator<=(const optional& x, nullopt_t) noexcept + { + return !x; + } + template + constexpr bool operator<=(nullopt_t, const optional&) noexcept + { + return true; + } + template + constexpr bool operator>(const optional& x, nullopt_t) noexcept + { + return static_cast(x); + } + template + constexpr bool operator>(nullopt_t, const optional&) noexcept + { + return false; + } + template + constexpr bool operator>=(const optional&, nullopt_t) noexcept + { + return true; + } + template + constexpr bool operator>=(nullopt_t, const optional& x) noexcept + { + return !x; + } + + // Comparison with T [optional.comp_with_t] + + // Requires: The expression, e.g. "*x == v" shall be well-formed and its result + // shall be convertible to bool. + // The C++17 (N4606) "Equivalent to:" statements are used directly here. + template + constexpr auto operator==(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x == v)) + { + return static_cast(x) ? static_cast(*x == v) : false; + } + template + constexpr auto operator==(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v == *x)) + { + return static_cast(x) ? static_cast(v == *x) : false; + } + template + constexpr auto operator!=(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x != v)) + { + return static_cast(x) ? static_cast(*x != v) : true; + } + template + constexpr auto operator!=(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v != *x)) + { + return static_cast(x) ? static_cast(v != *x) : true; + } + template + constexpr auto operator<(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x < v)) + { + return static_cast(x) ? static_cast(*x < v) : true; + } + template + constexpr auto operator<(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v < *x)) + { + return static_cast(x) ? static_cast(v < *x) : false; + } + template + constexpr auto operator<=(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x <= v)) + { + return static_cast(x) ? static_cast(*x <= v) : true; + } + template + constexpr auto operator<=(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v <= *x)) + { + return static_cast(x) ? static_cast(v <= *x) : false; + } + template + constexpr auto operator>(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x > v)) + { + return static_cast(x) ? static_cast(*x > v) : false; + } + template + constexpr auto operator>(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v > *x)) + { + return static_cast(x) ? static_cast(v > *x) : true; + } + template + constexpr auto operator>=(const optional& x, const U& v) + -> decltype(optional_internal::convertible_to_bool(*x >= v)) + { + return static_cast(x) ? static_cast(*x >= v) : false; + } + template + constexpr auto operator>=(const U& v, const optional& x) + -> decltype(optional_internal::convertible_to_bool(v >= *x)) + { + return static_cast(x) ? static_cast(v >= *x) : true; + } + + ABSL_NAMESPACE_END +} // namespace absl + +namespace std +{ + + // std::hash specialization for absl::optional. + template + struct hash> : absl::optional_internal::optional_hash_base + { + }; + +} // namespace std + +#undef ABSL_MSVC_CONSTEXPR_BUG_IN_UNION_LIKE_CLASS + +#endif // ABSL_USES_STD_OPTIONAL + +#endif // ABSL_TYPES_OPTIONAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/span.h b/CAPI/cpp/grpc/include/absl/types/span.h new file mode 100644 index 00000000..9d0e61d9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/span.h @@ -0,0 +1,859 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// span.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `Span` type for holding a reference to existing +// array data. The `Span` object, much like the `absl::string_view` object, +// does not own such data itself, and the data being referenced by the span must +// outlive the span itself. Unlike `view` type references, a span can hold a +// reference to mutable data (and can mutate it for underlying types of +// non-const T.) A span provides a lightweight way to pass a reference to such +// data. +// +// Additionally, this header file defines `MakeSpan()` and `MakeConstSpan()` +// factory functions, for clearly creating spans of type `Span` or read-only +// `Span` when such types may be difficult to identify due to issues +// with implicit conversion. +// +// The C++20 draft standard includes a `std::span` type. As of June 2020, the +// differences between `absl::Span` and `std::span` are: +// * `absl::Span` has `operator==` (which is likely a design bug, +// per https://abseil.io/blog/20180531-regular-types) +// * `absl::Span` has the factory functions `MakeSpan()` and +// `MakeConstSpan()` +// * bounds-checked access to `absl::Span` is accomplished with `at()` +// * `absl::Span` has compiler-provided move and copy constructors and +// assignment. This is due to them being specified as `constexpr`, but that +// implies const in C++11. +// * A read-only `absl::Span` can be implicitly constructed from an +// initializer list. +// * `absl::Span` has no `bytes()`, `size_bytes()`, `as_bytes()`, or +// `as_mutable_bytes()` methods +// * `absl::Span` has no static extent template parameter, nor constructors +// which exist only because of the static extent parameter. +// * `absl::Span` has an explicit mutable-reference constructor +// +// For more information, see the class comments below. +#ifndef ABSL_TYPES_SPAN_H_ +#define ABSL_TYPES_SPAN_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" // TODO(strel): remove this include +#include "absl/meta/type_traits.h" +#include "absl/types/internal/span.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + //------------------------------------------------------------------------------ + // Span + //------------------------------------------------------------------------------ + // + // A `Span` is an "array reference" type for holding a reference of contiguous + // array data; the `Span` object does not and cannot own such data itself. A + // span provides an easy way to provide overloads for anything operating on + // contiguous sequences without needing to manage pointers and array lengths + // manually. + + // A span is conceptually a pointer (ptr) and a length (size) into an already + // existing array of contiguous memory; the array it represents references the + // elements "ptr[0] .. ptr[size-1]". Passing a properly-constructed `Span` + // instead of raw pointers avoids many issues related to index out of bounds + // errors. + // + // Spans may also be constructed from containers holding contiguous sequences. + // Such containers must supply `data()` and `size() const` methods (e.g + // `std::vector`, `absl::InlinedVector`). All implicit conversions to + // `absl::Span` from such containers will create spans of type `const T`; + // spans which can mutate their values (of type `T`) must use explicit + // constructors. + // + // A `Span` is somewhat analogous to an `absl::string_view`, but for an array + // of elements of type `T`, and unlike an `absl::string_view`, a span can hold a + // reference to mutable data. A user of `Span` must ensure that the data being + // pointed to outlives the `Span` itself. + // + // You can construct a `Span` in several ways: + // + // * Explicitly from a reference to a container type + // * Explicitly from a pointer and size + // * Implicitly from a container type (but only for spans of type `const T`) + // * Using the `MakeSpan()` or `MakeConstSpan()` factory functions. + // + // Examples: + // + // // Construct a Span explicitly from a container: + // std::vector v = {1, 2, 3, 4, 5}; + // auto span = absl::Span(v); + // + // // Construct a Span explicitly from a C-style array: + // int a[5] = {1, 2, 3, 4, 5}; + // auto span = absl::Span(a); + // + // // Construct a Span implicitly from a container + // void MyRoutine(absl::Span a) { + // ... + // } + // std::vector v = {1,2,3,4,5}; + // MyRoutine(v) // convert to Span + // + // Note that `Span` objects, in addition to requiring that the memory they + // point to remains alive, must also ensure that such memory does not get + // reallocated. Therefore, to avoid undefined behavior, containers with + // associated spans should not invoke operations that may reallocate memory + // (such as resizing) or invalidate iterators into the container. + // + // One common use for a `Span` is when passing arguments to a routine that can + // accept a variety of array types (e.g. a `std::vector`, `absl::InlinedVector`, + // a C-style array, etc.). Instead of creating overloads for each case, you + // can simply specify a `Span` as the argument to such a routine. + // + // Example: + // + // void MyRoutine(absl::Span a) { + // ... + // } + // + // std::vector v = {1,2,3,4,5}; + // MyRoutine(v); + // + // absl::InlinedVector my_inline_vector; + // MyRoutine(my_inline_vector); + // + // // Explicit constructor from pointer,size + // int* my_array = new int[10]; + // MyRoutine(absl::Span(my_array, 10)); + template + class Span + { + private: + // Used to determine whether a Span can be constructed from a container of + // type C. + template + using EnableIfConvertibleFrom = + typename std::enable_if::value && span_internal::HasSize::value>::type; + + // Used to SFINAE-enable a function when the slice elements are const. + template + using EnableIfValueIsConst = + typename std::enable_if::value, U>::type; + + // Used to SFINAE-enable a function when the slice elements are mutable. + template + using EnableIfValueIsMutable = + typename std::enable_if::value, U>::type; + + public: + using element_type = T; + using value_type = absl::remove_cv_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using size_type = size_t; + using difference_type = ptrdiff_t; + + static const size_type npos = ~(size_type(0)); + + constexpr Span() noexcept : + Span(nullptr, 0) + { + } + constexpr Span(pointer array, size_type length) noexcept + : + ptr_(array), + len_(length) + { + } + + // Implicit conversion constructors + template + constexpr Span(T (&a)[N]) noexcept // NOLINT(runtime/explicit) + : + Span(a, N) + { + } + + // Explicit reference constructor for a mutable `Span` type. Can be + // replaced with MakeSpan() to infer the type parameter. + template, typename = EnableIfValueIsMutable, + typename = span_internal::EnableIfNotIsView> + explicit Span( + V& v + ABSL_ATTRIBUTE_LIFETIME_BOUND + ) noexcept // NOLINT(runtime/references) + : + Span(span_internal::GetData(v), v.size()) + { + } + + // Implicit reference constructor for a read-only `Span` type + template, typename = EnableIfValueIsConst, + typename = span_internal::EnableIfNotIsView> + constexpr Span( + const V& v + ABSL_ATTRIBUTE_LIFETIME_BOUND + ) noexcept // NOLINT(runtime/explicit) + : + Span(span_internal::GetData(v), v.size()) + { + } + + // Overloads of the above two functions that are only enabled for view types. + // This is so we can drop the ABSL_ATTRIBUTE_LIFETIME_BOUND annotation. These + // overloads must be made unique by using a different template parameter list + // (hence the = 0 for the IsView enabler). + template, typename = EnableIfValueIsMutable, + span_internal::EnableIfIsView = 0> + explicit Span(V& v) noexcept // NOLINT(runtime/references) + : + Span(span_internal::GetData(v), v.size()) + { + } + template, typename = EnableIfValueIsConst, + span_internal::EnableIfIsView = 0> + constexpr Span(const V& v) noexcept // NOLINT(runtime/explicit) + : + Span(span_internal::GetData(v), v.size()) + { + } + + // Implicit constructor from an initializer list, making it possible to pass a + // brace-enclosed initializer list to a function expecting a `Span`. Such + // spans constructed from an initializer list must be of type `Span`. + // + // void Process(absl::Span x); + // Process({1, 2, 3}); + // + // Note that as always the array referenced by the span must outlive the span. + // Since an initializer list constructor acts as if it is fed a temporary + // array (cf. C++ standard [dcl.init.list]/5), it's safe to use this + // constructor only when the `std::initializer_list` itself outlives the span. + // In order to meet this requirement it's sufficient to ensure that neither + // the span nor a copy of it is used outside of the expression in which it's + // created: + // + // // Assume that this function uses the array directly, not retaining any + // // copy of the span or pointer to any of its elements. + // void Process(absl::Span ints); + // + // // Okay: the std::initializer_list will reference a temporary array + // // that isn't destroyed until after the call to Process returns. + // Process({ 17, 19 }); + // + // // Not okay: the storage used by the std::initializer_list is not + // // allowed to be referenced after the first line. + // absl::Span ints = { 17, 19 }; + // Process(ints); + // + // // Not okay for the same reason as above: even when the elements of the + // // initializer list expression are not temporaries the underlying array + // // is, so the initializer list must still outlive the span. + // const int foo = 17; + // absl::Span ints = { foo }; + // Process(ints); + // + template> + Span(std::initializer_list v + ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/explicit) + : + Span(v.begin(), v.size()) + { + } + + // Accessors + + // Span::data() + // + // Returns a pointer to the span's underlying array of data (which is held + // outside the span). + constexpr pointer data() const noexcept + { + return ptr_; + } + + // Span::size() + // + // Returns the size of this span. + constexpr size_type size() const noexcept + { + return len_; + } + + // Span::length() + // + // Returns the length (size) of this span. + constexpr size_type length() const noexcept + { + return size(); + } + + // Span::empty() + // + // Returns a boolean indicating whether or not this span is considered empty. + constexpr bool empty() const noexcept + { + return size() == 0; + } + + // Span::operator[] + // + // Returns a reference to the i'th element of this span. + constexpr reference operator[](size_type i) const noexcept + { + return ABSL_HARDENING_ASSERT(i < size()), ptr_[i]; + } + + // Span::at() + // + // Returns a reference to the i'th element of this span. + constexpr reference at(size_type i) const + { + return ABSL_PREDICT_TRUE(i < size()) // + ? + *(data() + i) : + (base_internal::ThrowStdOutOfRange( + "Span::at failed bounds check" + ), + *(data() + i)); + } + + // Span::front() + // + // Returns a reference to the first element of this span. The span must not + // be empty. + constexpr reference front() const noexcept + { + return ABSL_HARDENING_ASSERT(size() > 0), *data(); + } + + // Span::back() + // + // Returns a reference to the last element of this span. The span must not + // be empty. + constexpr reference back() const noexcept + { + return ABSL_HARDENING_ASSERT(size() > 0), *(data() + size() - 1); + } + + // Span::begin() + // + // Returns an iterator pointing to the first element of this span, or `end()` + // if the span is empty. + constexpr iterator begin() const noexcept + { + return data(); + } + + // Span::cbegin() + // + // Returns a const iterator pointing to the first element of this span, or + // `end()` if the span is empty. + constexpr const_iterator cbegin() const noexcept + { + return begin(); + } + + // Span::end() + // + // Returns an iterator pointing just beyond the last element at the + // end of this span. This iterator acts as a placeholder; attempting to + // access it results in undefined behavior. + constexpr iterator end() const noexcept + { + return data() + size(); + } + + // Span::cend() + // + // Returns a const iterator pointing just beyond the last element at the + // end of this span. This iterator acts as a placeholder; attempting to + // access it results in undefined behavior. + constexpr const_iterator cend() const noexcept + { + return end(); + } + + // Span::rbegin() + // + // Returns a reverse iterator pointing to the last element at the end of this + // span, or `rend()` if the span is empty. + constexpr reverse_iterator rbegin() const noexcept + { + return reverse_iterator(end()); + } + + // Span::crbegin() + // + // Returns a const reverse iterator pointing to the last element at the end of + // this span, or `crend()` if the span is empty. + constexpr const_reverse_iterator crbegin() const noexcept + { + return rbegin(); + } + + // Span::rend() + // + // Returns a reverse iterator pointing just before the first element + // at the beginning of this span. This pointer acts as a placeholder; + // attempting to access its element results in undefined behavior. + constexpr reverse_iterator rend() const noexcept + { + return reverse_iterator(begin()); + } + + // Span::crend() + // + // Returns a reverse const iterator pointing just before the first element + // at the beginning of this span. This pointer acts as a placeholder; + // attempting to access its element results in undefined behavior. + constexpr const_reverse_iterator crend() const noexcept + { + return rend(); + } + + // Span mutations + + // Span::remove_prefix() + // + // Removes the first `n` elements from the span. + void remove_prefix(size_type n) noexcept + { + ABSL_HARDENING_ASSERT(size() >= n); + ptr_ += n; + len_ -= n; + } + + // Span::remove_suffix() + // + // Removes the last `n` elements from the span. + void remove_suffix(size_type n) noexcept + { + ABSL_HARDENING_ASSERT(size() >= n); + len_ -= n; + } + + // Span::subspan() + // + // Returns a `Span` starting at element `pos` and of length `len`. Both `pos` + // and `len` are of type `size_type` and thus non-negative. Parameter `pos` + // must be <= size(). Any `len` value that points past the end of the span + // will be trimmed to at most size() - `pos`. A default `len` value of `npos` + // ensures the returned subspan continues until the end of the span. + // + // Examples: + // + // std::vector vec = {10, 11, 12, 13}; + // absl::MakeSpan(vec).subspan(1, 2); // {11, 12} + // absl::MakeSpan(vec).subspan(2, 8); // {12, 13} + // absl::MakeSpan(vec).subspan(1); // {11, 12, 13} + // absl::MakeSpan(vec).subspan(4); // {} + // absl::MakeSpan(vec).subspan(5); // throws std::out_of_range + constexpr Span subspan(size_type pos = 0, size_type len = npos) const + { + return (pos <= size()) ? Span(data() + pos, (std::min)(size() - pos, len)) : (base_internal::ThrowStdOutOfRange("pos > size()"), Span()); + } + + // Span::first() + // + // Returns a `Span` containing first `len` elements. Parameter `len` is of + // type `size_type` and thus non-negative. `len` value must be <= size(). + // + // Examples: + // + // std::vector vec = {10, 11, 12, 13}; + // absl::MakeSpan(vec).first(1); // {10} + // absl::MakeSpan(vec).first(3); // {10, 11, 12} + // absl::MakeSpan(vec).first(5); // throws std::out_of_range + constexpr Span first(size_type len) const + { + return (len <= size()) ? Span(data(), len) : (base_internal::ThrowStdOutOfRange("len > size()"), Span()); + } + + // Span::last() + // + // Returns a `Span` containing last `len` elements. Parameter `len` is of + // type `size_type` and thus non-negative. `len` value must be <= size(). + // + // Examples: + // + // std::vector vec = {10, 11, 12, 13}; + // absl::MakeSpan(vec).last(1); // {13} + // absl::MakeSpan(vec).last(3); // {11, 12, 13} + // absl::MakeSpan(vec).last(5); // throws std::out_of_range + constexpr Span last(size_type len) const + { + return (len <= size()) ? Span(size() - len + data(), len) : (base_internal::ThrowStdOutOfRange("len > size()"), Span()); + } + + // Support for absl::Hash. + template + friend H AbslHashValue(H h, Span v) + { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), v.size()); + } + + private: + pointer ptr_; + size_type len_; + }; + + template + const typename Span::size_type Span::npos; + + // Span relationals + + // Equality is compared element-by-element, while ordering is lexicographical. + // We provide three overloads for each operator to cover any combination on the + // left or right hand side of mutable Span, read-only Span, and + // convertible-to-read-only Span. + // TODO(zhangxy): Due to MSVC overload resolution bug with partial ordering + // template functions, 5 overloads per operator is needed as a workaround. We + // should update them to 3 overloads per operator using non-deduced context like + // string_view, i.e. + // - (Span, Span) + // - (Span, non_deduced>) + // - (non_deduced>, Span) + + // operator== + template + bool operator==(Span a, Span b) + { + return span_internal::EqualImpl(a, b); + } + template + bool operator==(Span a, Span b) + { + return span_internal::EqualImpl(a, b); + } + template + bool operator==(Span a, Span b) + { + return span_internal::EqualImpl(a, b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator==(const U& a, Span b) + { + return span_internal::EqualImpl(a, b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator==(Span a, const U& b) + { + return span_internal::EqualImpl(a, b); + } + + // operator!= + template + bool operator!=(Span a, Span b) + { + return !(a == b); + } + template + bool operator!=(Span a, Span b) + { + return !(a == b); + } + template + bool operator!=(Span a, Span b) + { + return !(a == b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator!=(const U& a, Span b) + { + return !(a == b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator!=(Span a, const U& b) + { + return !(a == b); + } + + // operator< + template + bool operator<(Span a, Span b) + { + return span_internal::LessThanImpl(a, b); + } + template + bool operator<(Span a, Span b) + { + return span_internal::LessThanImpl(a, b); + } + template + bool operator<(Span a, Span b) + { + return span_internal::LessThanImpl(a, b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator<(const U& a, Span b) + { + return span_internal::LessThanImpl(a, b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator<(Span a, const U& b) + { + return span_internal::LessThanImpl(a, b); + } + + // operator> + template + bool operator>(Span a, Span b) + { + return b < a; + } + template + bool operator>(Span a, Span b) + { + return b < a; + } + template + bool operator>(Span a, Span b) + { + return b < a; + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator>(const U& a, Span b) + { + return b < a; + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator>(Span a, const U& b) + { + return b < a; + } + + // operator<= + template + bool operator<=(Span a, Span b) + { + return !(b < a); + } + template + bool operator<=(Span a, Span b) + { + return !(b < a); + } + template + bool operator<=(Span a, Span b) + { + return !(b < a); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator<=(const U& a, Span b) + { + return !(b < a); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator<=(Span a, const U& b) + { + return !(b < a); + } + + // operator>= + template + bool operator>=(Span a, Span b) + { + return !(a < b); + } + template + bool operator>=(Span a, Span b) + { + return !(a < b); + } + template + bool operator>=(Span a, Span b) + { + return !(a < b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator>=(const U& a, Span b) + { + return !(a < b); + } + template< + typename T, + typename U, + typename = span_internal::EnableIfConvertibleTo>> + bool operator>=(Span a, const U& b) + { + return !(a < b); + } + + // MakeSpan() + // + // Constructs a mutable `Span`, deducing `T` automatically from either a + // container or pointer+size. + // + // Because a read-only `Span` is implicitly constructed from container + // types regardless of whether the container itself is a const container, + // constructing mutable spans of type `Span` from containers requires + // explicit constructors. The container-accepting version of `MakeSpan()` + // deduces the type of `T` by the constness of the pointer received from the + // container's `data()` member. Similarly, the pointer-accepting version returns + // a `Span` if `T` is `const`, and a `Span` otherwise. + // + // Examples: + // + // void MyRoutine(absl::Span a) { + // ... + // }; + // // my_vector is a container of non-const types + // std::vector my_vector; + // + // // Constructing a Span implicitly attempts to create a Span of type + // // `Span` + // MyRoutine(my_vector); // error, type mismatch + // + // // Explicitly constructing the Span is verbose + // MyRoutine(absl::Span(my_vector)); + // + // // Use MakeSpan() to make an absl::Span + // MyRoutine(absl::MakeSpan(my_vector)); + // + // // Construct a span from an array ptr+size + // absl::Span my_span() { + // return absl::MakeSpan(&array[0], num_elements_); + // } + // + template + constexpr Span MakeSpan(T* ptr, size_t size) noexcept + { + return Span(ptr, size); + } + + template + Span MakeSpan(T* begin, T* end) noexcept + { + return ABSL_HARDENING_ASSERT(begin <= end), + Span(begin, static_cast(end - begin)); + } + + template + constexpr auto MakeSpan(C& c) noexcept // NOLINT(runtime/references) + -> decltype(absl::MakeSpan(span_internal::GetData(c), c.size())) + { + return MakeSpan(span_internal::GetData(c), c.size()); + } + + template + constexpr Span MakeSpan(T (&array)[N]) noexcept + { + return Span(array, N); + } + + // MakeConstSpan() + // + // Constructs a `Span` as with `MakeSpan`, deducing `T` automatically, + // but always returning a `Span`. + // + // Examples: + // + // void ProcessInts(absl::Span some_ints); + // + // // Call with a pointer and size. + // int array[3] = { 0, 0, 0 }; + // ProcessInts(absl::MakeConstSpan(&array[0], 3)); + // + // // Call with a [begin, end) pair. + // ProcessInts(absl::MakeConstSpan(&array[0], &array[3])); + // + // // Call directly with an array. + // ProcessInts(absl::MakeConstSpan(array)); + // + // // Call with a contiguous container. + // std::vector some_ints = ...; + // ProcessInts(absl::MakeConstSpan(some_ints)); + // ProcessInts(absl::MakeConstSpan(std::vector{ 0, 0, 0 })); + // + template + constexpr Span MakeConstSpan(T* ptr, size_t size) noexcept + { + return Span(ptr, size); + } + + template + Span MakeConstSpan(T* begin, T* end) noexcept + { + return ABSL_HARDENING_ASSERT(begin <= end), Span(begin, end - begin); + } + + template + constexpr auto MakeConstSpan(const C& c) noexcept -> decltype(MakeSpan(c)) + { + return MakeSpan(c); + } + + template + constexpr Span MakeConstSpan(const T (&array)[N]) noexcept + { + return Span(array, N); + } + ABSL_NAMESPACE_END +} // namespace absl +#endif // ABSL_TYPES_SPAN_H_ diff --git a/CAPI/cpp/grpc/include/absl/types/variant.h b/CAPI/cpp/grpc/include/absl/types/variant.h new file mode 100644 index 00000000..d5c1f815 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/types/variant.h @@ -0,0 +1,925 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// variant.h +// ----------------------------------------------------------------------------- +// +// This header file defines an `absl::variant` type for holding a type-safe +// value of some prescribed set of types (noted as alternative types), and +// associated functions for managing variants. +// +// The `absl::variant` type is a form of type-safe union. An `absl::variant` +// should always hold a value of one of its alternative types (except in the +// "valueless by exception state" -- see below). A default-constructed +// `absl::variant` will hold the value of its first alternative type, provided +// it is default-constructible. +// +// In exceptional cases due to error, an `absl::variant` can hold no +// value (known as a "valueless by exception" state), though this is not the +// norm. +// +// As with `absl::optional`, an `absl::variant` -- when it holds a value -- +// allocates a value of that type directly within the `variant` itself; it +// cannot hold a reference, array, or the type `void`; it can, however, hold a +// pointer to externally managed memory. +// +// `absl::variant` is a C++11 compatible version of the C++17 `std::variant` +// abstraction and is designed to be a drop-in replacement for code compliant +// with C++17. + +#ifndef ABSL_TYPES_VARIANT_H_ +#define ABSL_TYPES_VARIANT_H_ + +#include "absl/base/config.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_USES_STD_VARIANT + +#include // IWYU pragma: export + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + using std::bad_variant_access; + using std::get; + using std::get_if; + using std::holds_alternative; + using std::monostate; + using std::variant; + using std::variant_alternative; + using std::variant_alternative_t; + using std::variant_npos; + using std::variant_size; + using std::variant_size_v; + using std::visit; + ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_USES_STD_VARIANT + +#include +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/meta/type_traits.h" +#include "absl/types/internal/variant.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // ----------------------------------------------------------------------------- + // absl::variant + // ----------------------------------------------------------------------------- + // + // An `absl::variant` type is a form of type-safe union. An `absl::variant` -- + // except in exceptional cases -- always holds a value of one of its alternative + // types. + // + // Example: + // + // // Construct a variant that holds either an integer or a std::string and + // // assign it to a std::string. + // absl::variant v = std::string("abc"); + // + // // A default-constructed variant will hold a value-initialized value of + // // the first alternative type. + // auto a = absl::variant(); // Holds an int of value '0'. + // + // // variants are assignable. + // + // // copy assignment + // auto v1 = absl::variant("abc"); + // auto v2 = absl::variant(10); + // v2 = v1; // copy assign + // + // // move assignment + // auto v1 = absl::variant("abc"); + // v1 = absl::variant(10); + // + // // assignment through type conversion + // a = 128; // variant contains int + // a = "128"; // variant contains std::string + // + // An `absl::variant` holding a value of one of its alternative types `T` holds + // an allocation of `T` directly within the variant itself. An `absl::variant` + // is not allowed to allocate additional storage, such as dynamic memory, to + // allocate the contained value. The contained value shall be allocated in a + // region of the variant storage suitably aligned for all alternative types. + template + class variant; + + // swap() + // + // Swaps two `absl::variant` values. This function is equivalent to `v.swap(w)` + // where `v` and `w` are `absl::variant` types. + // + // Note that this function requires all alternative types to be both swappable + // and move-constructible, because any two variants may refer to either the same + // type (in which case, they will be swapped) or to two different types (in + // which case the values will need to be moved). + // + template< + typename... Ts, + absl::enable_if_t< + absl::conjunction..., type_traits_internal::IsSwappable...>::value, + int> = 0> + void swap(variant& v, variant& w) noexcept(noexcept(v.swap(w))) + { + v.swap(w); + } + + // variant_size + // + // Returns the number of alternative types available for a given `absl::variant` + // type as a compile-time constant expression. As this is a class template, it + // is not generally useful for accessing the number of alternative types of + // any given `absl::variant` instance. + // + // Example: + // + // auto a = absl::variant; + // constexpr int num_types = + // absl::variant_size>(); + // + // // You can also use the member constant `value`. + // constexpr int num_types = + // absl::variant_size>::value; + // + // // `absl::variant_size` is more valuable for use in generic code: + // template + // constexpr bool IsVariantMultivalue() { + // return absl::variant_size() > 1; + // } + // + // Note that the set of cv-qualified specializations of `variant_size` are + // provided to ensure that those specializations compile (especially when passed + // within template logic). + template + struct variant_size; + + template + struct variant_size> : std::integral_constant + { + }; + + // Specialization of `variant_size` for const qualified variants. + template + struct variant_size : variant_size::type + { + }; + + // Specialization of `variant_size` for volatile qualified variants. + template + struct variant_size : variant_size::type + { + }; + + // Specialization of `variant_size` for const volatile qualified variants. + template + struct variant_size : variant_size::type + { + }; + + // variant_alternative + // + // Returns the alternative type for a given `absl::variant` at the passed + // index value as a compile-time constant expression. As this is a class + // template resulting in a type, it is not useful for access of the run-time + // value of any given `absl::variant` variable. + // + // Example: + // + // // The type of the 0th alternative is "int". + // using alternative_type_0 + // = absl::variant_alternative<0, absl::variant>::type; + // + // static_assert(std::is_same::value, ""); + // + // // `absl::variant_alternative` is more valuable for use in generic code: + // template + // constexpr bool IsFirstElementTrivial() { + // return std::is_trivial_v::type>; + // } + // + // Note that the set of cv-qualified specializations of `variant_alternative` + // are provided to ensure that those specializations compile (especially when + // passed within template logic). + template + struct variant_alternative; + + template + struct variant_alternative> + { + using type = + variant_internal::VariantAlternativeSfinaeT>; + }; + + // Specialization of `variant_alternative` for const qualified variants. + template + struct variant_alternative + { + using type = const typename variant_alternative::type; + }; + + // Specialization of `variant_alternative` for volatile qualified variants. + template + struct variant_alternative + { + using type = volatile typename variant_alternative::type; + }; + + // Specialization of `variant_alternative` for const volatile qualified + // variants. + template + struct variant_alternative + { + using type = const volatile typename variant_alternative::type; + }; + + // Template type alias for variant_alternative::type. + // + // Example: + // + // using alternative_type_0 + // = absl::variant_alternative_t<0, absl::variant>; + // static_assert(std::is_same::value, ""); + template + using variant_alternative_t = typename variant_alternative::type; + + // holds_alternative() + // + // Checks whether the given variant currently holds a given alternative type, + // returning `true` if so. + // + // Example: + // + // absl::variant foo = 42; + // if (absl::holds_alternative(foo)) { + // std::cout << "The variant holds an integer"; + // } + template + constexpr bool holds_alternative(const variant& v) noexcept + { + static_assert( + variant_internal::UnambiguousIndexOfImpl, T, 0>::value != sizeof...(Types), + "The type T must occur exactly once in Types..." + ); + return v.index() == + variant_internal::UnambiguousIndexOf, T>::value; + } + + // get() + // + // Returns a reference to the value currently within a given variant, using + // either a unique alternative type amongst the variant's set of alternative + // types, or the variant's index value. Attempting to get a variant's value + // using a type that is not unique within the variant's set of alternative types + // is a compile-time error. If the index of the alternative being specified is + // different from the index of the alternative that is currently stored, throws + // `absl::bad_variant_access`. + // + // Example: + // + // auto a = absl::variant; + // + // // Get the value by type (if unique). + // int i = absl::get(a); + // + // auto b = absl::variant; + // + // // Getting the value by a type that is not unique is ill-formed. + // int j = absl::get(b); // Compile Error! + // + // // Getting value by index not ambiguous and allowed. + // int k = absl::get<1>(b); + + // Overload for getting a variant's lvalue by type. + template + constexpr T& get(variant& v) + { // NOLINT + return variant_internal::VariantCoreAccess::CheckedAccess< + variant_internal::IndexOf::value>(v); + } + + // Overload for getting a variant's rvalue by type. + // Note: `absl::move()` is required to allow use of constexpr in C++11. + template + constexpr T&& get(variant&& v) + { + return variant_internal::VariantCoreAccess::CheckedAccess< + variant_internal::IndexOf::value>(absl::move(v)); + } + + // Overload for getting a variant's const lvalue by type. + template + constexpr const T& get(const variant& v) + { + return variant_internal::VariantCoreAccess::CheckedAccess< + variant_internal::IndexOf::value>(v); + } + + // Overload for getting a variant's const rvalue by type. + // Note: `absl::move()` is required to allow use of constexpr in C++11. + template + constexpr const T&& get(const variant&& v) + { + return variant_internal::VariantCoreAccess::CheckedAccess< + variant_internal::IndexOf::value>(absl::move(v)); + } + + // Overload for getting a variant's lvalue by index. + template + constexpr variant_alternative_t>& get( + variant& v + ) + { // NOLINT + return variant_internal::VariantCoreAccess::CheckedAccess(v); + } + + // Overload for getting a variant's rvalue by index. + // Note: `absl::move()` is required to allow use of constexpr in C++11. + template + constexpr variant_alternative_t>&& get( + variant&& v + ) + { + return variant_internal::VariantCoreAccess::CheckedAccess(absl::move(v)); + } + + // Overload for getting a variant's const lvalue by index. + template + constexpr const variant_alternative_t>& get( + const variant& v + ) + { + return variant_internal::VariantCoreAccess::CheckedAccess(v); + } + + // Overload for getting a variant's const rvalue by index. + // Note: `absl::move()` is required to allow use of constexpr in C++11. + template + constexpr const variant_alternative_t>&& get( + const variant&& v + ) + { + return variant_internal::VariantCoreAccess::CheckedAccess(absl::move(v)); + } + + // get_if() + // + // Returns a pointer to the value currently stored within a given variant, if + // present, using either a unique alternative type amongst the variant's set of + // alternative types, or the variant's index value. If such a value does not + // exist, returns `nullptr`. + // + // As with `get`, attempting to get a variant's value using a type that is not + // unique within the variant's set of alternative types is a compile-time error. + + // Overload for getting a pointer to the value stored in the given variant by + // index. + template + constexpr absl::add_pointer_t>> + get_if(variant* v) noexcept + { + return (v != nullptr && v->index() == I) ? std::addressof( + variant_internal::VariantCoreAccess::Access(*v) + ) : + nullptr; + } + + // Overload for getting a pointer to the const value stored in the given + // variant by index. + template + constexpr absl::add_pointer_t>> + get_if(const variant* v) noexcept + { + return (v != nullptr && v->index() == I) ? std::addressof( + variant_internal::VariantCoreAccess::Access(*v) + ) : + nullptr; + } + + // Overload for getting a pointer to the value stored in the given variant by + // type. + template + constexpr absl::add_pointer_t get_if(variant* v) noexcept + { + return absl::get_if::value>(v); + } + + // Overload for getting a pointer to the const value stored in the given variant + // by type. + template + constexpr absl::add_pointer_t get_if( + const variant* v + ) noexcept + { + return absl::get_if::value>(v); + } + + // visit() + // + // Calls a provided functor on a given set of variants. `absl::visit()` is + // commonly used to conditionally inspect the state of a given variant (or set + // of variants). + // + // The functor must return the same type when called with any of the variants' + // alternatives. + // + // Example: + // + // // Define a visitor functor + // struct GetVariant { + // template + // void operator()(const T& i) const { + // std::cout << "The variant's value is: " << i; + // } + // }; + // + // // Declare our variant, and call `absl::visit()` on it. + // // Note that `GetVariant()` returns void in either case. + // absl::variant foo = std::string("foo"); + // GetVariant visitor; + // absl::visit(visitor, foo); // Prints `The variant's value is: foo' + template + variant_internal::VisitResult visit(Visitor&& vis, Variants&&... vars) + { + return variant_internal:: + VisitIndices>::value...>::Run( + variant_internal::PerformVisitation{ + std::forward_as_tuple(absl::forward(vars)...), + absl::forward(vis)}, + vars.index()... + ); + } + + // monostate + // + // The monostate class serves as a first alternative type for a variant for + // which the first variant type is otherwise not default-constructible. + struct monostate + { + }; + + // `absl::monostate` Relational Operators + + constexpr bool operator<(monostate, monostate) noexcept + { + return false; + } + constexpr bool operator>(monostate, monostate) noexcept + { + return false; + } + constexpr bool operator<=(monostate, monostate) noexcept + { + return true; + } + constexpr bool operator>=(monostate, monostate) noexcept + { + return true; + } + constexpr bool operator==(monostate, monostate) noexcept + { + return true; + } + constexpr bool operator!=(monostate, monostate) noexcept + { + return false; + } + + //------------------------------------------------------------------------------ + // `absl::variant` Template Definition + //------------------------------------------------------------------------------ + template + class variant : private variant_internal::VariantBase + { + static_assert(absl::conjunction, std::is_object...>::value, "Attempted to instantiate a variant containing a non-object " + "type."); + // Intentionally not qualifying `negation` with `absl::` to work around a bug + // in MSVC 2015 with inline namespace and variadic template. + static_assert(absl::conjunction>, negation>...>::value, "Attempted to instantiate a variant containing an array type."); + static_assert(absl::conjunction, std::is_nothrow_destructible...>::value, "Attempted to instantiate a variant containing a non-nothrow " + "destructible type."); + + friend struct variant_internal::VariantCoreAccess; + + private: + using Base = variant_internal::VariantBase; + + public: + // Constructors + + // Constructs a variant holding a default-initialized value of the first + // alternative type. + constexpr variant() /*noexcept(see 111above)*/ = default; + + // Copy constructor, standard semantics + variant(const variant& other) = default; + + // Move constructor, standard semantics + variant(variant&& other) /*noexcept(see above)*/ = default; + + // Constructs a variant of an alternative type specified by overload + // resolution of the provided forwarding arguments through + // direct-initialization. + // + // Note: If the selected constructor is a constexpr constructor, this + // constructor shall be a constexpr constructor. + // + // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html + // has been voted passed the design phase in the C++ standard meeting in Mar + // 2018. It will be implemented and integrated into `absl::variant`. + template< + class T, + std::size_t I = std::enable_if< + variant_internal::IsNeitherSelfNorInPlace>::value, + variant_internal::IndexOfConstructedType>::type::value, + class Tj = absl::variant_alternative_t, + absl::enable_if_t::value>* = + nullptr> + constexpr variant(T&& t) noexcept(std::is_nothrow_constructible::value) : + Base(variant_internal::EmplaceTag(), absl::forward(t)) + { + } + + // Constructs a variant of an alternative type from the arguments through + // direct-initialization. + // + // Note: If the selected constructor is a constexpr constructor, this + // constructor shall be a constexpr constructor. + template, Args...>::value>::type* = nullptr> + constexpr explicit variant(in_place_type_t, Args&&... args) : + Base(variant_internal::EmplaceTag::value>(), absl::forward(args)...) + { + } + + // Constructs a variant of an alternative type from an initializer list + // and other arguments through direct-initialization. + // + // Note: If the selected constructor is a constexpr constructor, this + // constructor shall be a constexpr constructor. + template, std::initializer_list&, Args...>::value>::type* = nullptr> + constexpr explicit variant(in_place_type_t, std::initializer_list il, Args&&... args) : + Base(variant_internal::EmplaceTag::value>(), il, absl::forward(args)...) + { + } + + // Constructs a variant of an alternative type from a provided index, + // through value-initialization using the provided forwarded arguments. + template, Args...>::value>::type* = nullptr> + constexpr explicit variant(in_place_index_t, Args&&... args) : + Base(variant_internal::EmplaceTag(), absl::forward(args)...) + { + } + + // Constructs a variant of an alternative type from a provided index, + // through value-initialization of an initializer list and the provided + // forwarded arguments. + template, std::initializer_list&, Args...>::value>::type* = nullptr> + constexpr explicit variant(in_place_index_t, std::initializer_list il, Args&&... args) : + Base(variant_internal::EmplaceTag(), il, absl::forward(args)...) + { + } + + // Destructors + + // Destroys the variant's currently contained value, provided that + // `absl::valueless_by_exception()` is false. + ~variant() = default; + + // Assignment Operators + + // Copy assignment operator + variant& operator=(const variant& other) = default; + + // Move assignment operator + variant& operator=(variant&& other) /*noexcept(see above)*/ = default; + + // Converting assignment operator + // + // NOTE: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0608r1.html + // has been voted passed the design phase in the C++ standard meeting in Mar + // 2018. It will be implemented and integrated into `absl::variant`. + template< + class T, + std::size_t I = std::enable_if< + !std::is_same, variant>::value, + variant_internal::IndexOfConstructedType>::type::value, + class Tj = absl::variant_alternative_t, + typename std::enable_if::value && std::is_constructible::value>::type* = + nullptr> + variant& operator=(T&& t) noexcept( + std::is_nothrow_assignable::value&& + std::is_nothrow_constructible::value + ) + { + variant_internal::VisitIndices::Run( + variant_internal::VariantCoreAccess::MakeConversionAssignVisitor( + this, absl::forward(t) + ), + index() + ); + + return *this; + } + + // emplace() Functions + + // Constructs a value of the given alternative type T within the variant. The + // existing value of the variant is destroyed first (provided that + // `absl::valueless_by_exception()` is false). Requires that T is unambiguous + // in the variant. + // + // Example: + // + // absl::variant, int, std::string> v; + // v.emplace(99); + // v.emplace("abc"); + template< + class T, + class... Args, + typename std::enable_if::value, + variant>, + Args...>::value>::type* = nullptr> + T& emplace(Args&&... args) + { + return variant_internal::VariantCoreAccess::Replace< + variant_internal::UnambiguousIndexOf::value>( + this, absl::forward(args)... + ); + } + + // Constructs a value of the given alternative type T within the variant using + // an initializer list. The existing value of the variant is destroyed first + // (provided that `absl::valueless_by_exception()` is false). Requires that T + // is unambiguous in the variant. + // + // Example: + // + // absl::variant, int, std::string> v; + // v.emplace>({0, 1, 2}); + template< + class T, + class U, + class... Args, + typename std::enable_if::value, + variant>, + std::initializer_list&, + Args...>::value>::type* = nullptr> + T& emplace(std::initializer_list il, Args&&... args) + { + return variant_internal::VariantCoreAccess::Replace< + variant_internal::UnambiguousIndexOf::value>( + this, il, absl::forward(args)... + ); + } + + // Destroys the current value of the variant (provided that + // `absl::valueless_by_exception()` is false) and constructs a new value at + // the given index. + // + // Example: + // + // absl::variant, int, int> v; + // v.emplace<1>(99); + // v.emplace<2>(98); + // v.emplace(99); // Won't compile. 'int' isn't a unique type. + template, Args...>::value>::type* = nullptr> + absl::variant_alternative_t& emplace(Args&&... args) + { + return variant_internal::VariantCoreAccess::Replace( + this, absl::forward(args)... + ); + } + + // Destroys the current value of the variant (provided that + // `absl::valueless_by_exception()` is false) and constructs a new value at + // the given index using an initializer list and the provided arguments. + // + // Example: + // + // absl::variant, int, int> v; + // v.emplace<0>({0, 1, 2}); + template, std::initializer_list&, Args...>::value>::type* = nullptr> + absl::variant_alternative_t& emplace(std::initializer_list il, Args&&... args) + { + return variant_internal::VariantCoreAccess::Replace( + this, il, absl::forward(args)... + ); + } + + // variant::valueless_by_exception() + // + // Returns false if and only if the variant currently holds a valid value. + constexpr bool valueless_by_exception() const noexcept + { + return this->index_ == absl::variant_npos; + } + + // variant::index() + // + // Returns the index value of the variant's currently selected alternative + // type. + constexpr std::size_t index() const noexcept + { + return this->index_; + } + + // variant::swap() + // + // Swaps the values of two variant objects. + // + void swap(variant& rhs) noexcept( + absl::conjunction< + std::is_nothrow_move_constructible, + std::is_nothrow_move_constructible..., + type_traits_internal::IsNothrowSwappable, + type_traits_internal::IsNothrowSwappable...>::value + ) + { + return variant_internal::VisitIndices::Run( + variant_internal::Swap{this, &rhs}, rhs.index() + ); + } + }; + + // We need a valid declaration of variant<> for SFINAE and overload resolution + // to work properly above, but we don't need a full declaration since this type + // will never be constructed. This declaration, though incomplete, suffices. + template<> + class variant<>; + + //------------------------------------------------------------------------------ + // Relational Operators + //------------------------------------------------------------------------------ + // + // If neither operand is in the `variant::valueless_by_exception` state: + // + // * If the index of both variants is the same, the relational operator + // returns the result of the corresponding relational operator for the + // corresponding alternative type. + // * If the index of both variants is not the same, the relational operator + // returns the result of that operation applied to the value of the left + // operand's index and the value of the right operand's index. + // * If at least one operand is in the valueless_by_exception state: + // - A variant in the valueless_by_exception state is only considered equal + // to another variant in the valueless_by_exception state. + // - If exactly one operand is in the valueless_by_exception state, the + // variant in the valueless_by_exception state is less than the variant + // that is not in the valueless_by_exception state. + // + // Note: The value 1 is added to each index in the relational comparisons such + // that the index corresponding to the valueless_by_exception state wraps around + // to 0 (the lowest value for the index type), and the remaining indices stay in + // the same relative order. + + // Equal-to operator + template + constexpr variant_internal::RequireAllHaveEqualT operator==( + const variant& a, const variant& b + ) + { + return (a.index() == b.index()) && + variant_internal::VisitIndices::Run( + variant_internal::EqualsOp{&a, &b}, a.index() + ); + } + + // Not equal operator + template + constexpr variant_internal::RequireAllHaveNotEqualT operator!=( + const variant& a, const variant& b + ) + { + return (a.index() != b.index()) || + variant_internal::VisitIndices::Run( + variant_internal::NotEqualsOp{&a, &b}, a.index() + ); + } + + // Less-than operator + template + constexpr variant_internal::RequireAllHaveLessThanT operator<( + const variant& a, const variant& b + ) + { + return (a.index() != b.index()) ? (a.index() + 1) < (b.index() + 1) : variant_internal::VisitIndices::Run(variant_internal::LessThanOp{&a, &b}, a.index()); + } + + // Greater-than operator + template + constexpr variant_internal::RequireAllHaveGreaterThanT operator>( + const variant& a, const variant& b + ) + { + return (a.index() != b.index()) ? (a.index() + 1) > (b.index() + 1) : variant_internal::VisitIndices::Run(variant_internal::GreaterThanOp{&a, &b}, a.index()); + } + + // Less-than or equal-to operator + template + constexpr variant_internal::RequireAllHaveLessThanOrEqualT operator<=( + const variant& a, const variant& b + ) + { + return (a.index() != b.index()) ? (a.index() + 1) < (b.index() + 1) : variant_internal::VisitIndices::Run(variant_internal::LessThanOrEqualsOp{&a, &b}, a.index()); + } + + // Greater-than or equal-to operator + template + constexpr variant_internal::RequireAllHaveGreaterThanOrEqualT + operator>=(const variant& a, const variant& b) + { + return (a.index() != b.index()) ? (a.index() + 1) > (b.index() + 1) : variant_internal::VisitIndices::Run(variant_internal::GreaterThanOrEqualsOp{&a, &b}, a.index()); + } + + ABSL_NAMESPACE_END +} // namespace absl + +namespace std +{ + + // hash() + template<> // NOLINT + struct hash + { + std::size_t operator()(absl::monostate) const + { + return 0; + } + }; + + template // NOLINT + struct hash> : absl::variant_internal::VariantHashBase, void, absl::remove_const_t...> + { + }; + +} // namespace std + +#endif // ABSL_USES_STD_VARIANT + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace variant_internal + { + + // Helper visitor for converting a variant` into another type (mostly + // variant) that can be constructed from any type. + template + struct ConversionVisitor + { + template + To operator()(T&& v) const + { + return To(std::forward(v)); + } + }; + + } // namespace variant_internal + + // ConvertVariantTo() + // + // Helper functions to convert an `absl::variant` to a variant of another set of + // types, provided that the alternative type of the new variant type can be + // converted from any type in the source variant. + // + // Example: + // + // absl::variant InternalReq(const Req&); + // + // // name1 and name2 are convertible to name + // absl::variant ExternalReq(const Req& req) { + // return absl::ConvertVariantTo>( + // InternalReq(req)); + // } + template + To ConvertVariantTo(Variant&& variant) + { + return absl::visit(variant_internal::ConversionVisitor{}, std::forward(variant)); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TYPES_VARIANT_H_ diff --git a/CAPI/cpp/grpc/include/absl/utility/internal/if_constexpr.h b/CAPI/cpp/grpc/include/absl/utility/internal/if_constexpr.h new file mode 100644 index 00000000..aa317f0e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/utility/internal/if_constexpr.h @@ -0,0 +1,75 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The IfConstexpr and IfConstexprElse utilities in this file are meant to be +// used to emulate `if constexpr` in pre-C++17 mode in library implementation. +// The motivation is to allow for avoiding complex SFINAE. +// +// The functions passed in must depend on the type(s) of the object(s) that +// require SFINAE. For example: +// template +// int MaybeFoo(T& t) { +// if constexpr (HasFoo::value) return t.foo(); +// return 0; +// } +// +// can be written in pre-C++17 as: +// +// template +// int MaybeFoo(T& t) { +// int i = 0; +// absl::utility_internal::IfConstexpr::value>( +// [&](const auto& fooer) { i = fooer.foo(); }, t); +// return i; +// } + +#ifndef ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_ +#define ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace utility_internal + { + + template + auto IfConstexprElse(TrueFunc&& true_func, FalseFunc&& false_func, Args&&... args) + { + return std::get(std::forward_as_tuple( + std::forward(false_func), std::forward(true_func) + ))( + std::forward(args)... + ); + } + + template + void IfConstexpr(Func&& func, Args&&... args) + { + IfConstexprElse( + std::forward(func), [](auto&&...) {}, std::forward(args)... + ); + } + + } // namespace utility_internal + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_ diff --git a/CAPI/cpp/grpc/include/absl/utility/utility.h b/CAPI/cpp/grpc/include/absl/utility/utility.h new file mode 100644 index 00000000..3ca3f79c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/utility/utility.h @@ -0,0 +1,379 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file contains C++11 versions of standard header +// abstractions available within C++14 and C++17, and are designed to be drop-in +// replacement for code compliant with C++14 and C++17. +// +// The following abstractions are defined: +// +// * integer_sequence == std::integer_sequence +// * index_sequence == std::index_sequence +// * make_integer_sequence == std::make_integer_sequence +// * make_index_sequence == std::make_index_sequence +// * index_sequence_for == std::index_sequence_for +// * apply == std::apply +// * exchange == std::exchange +// * make_from_tuple == std::make_from_tuple +// +// This header file also provides the tag types `in_place_t`, `in_place_type_t`, +// and `in_place_index_t`, as well as the constant `in_place`, and +// `constexpr` `std::move()` and `std::forward()` implementations in C++11. +// +// References: +// +// https://en.cppreference.com/w/cpp/utility/integer_sequence +// https://en.cppreference.com/w/cpp/utility/apply +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3658.html + +#ifndef ABSL_UTILITY_UTILITY_H_ +#define ABSL_UTILITY_UTILITY_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/invoke.h" +#include "absl/meta/type_traits.h" + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // integer_sequence + // + // Class template representing a compile-time integer sequence. An instantiation + // of `integer_sequence` has a sequence of integers encoded in its + // type through its template arguments (which is a common need when + // working with C++11 variadic templates). `absl::integer_sequence` is designed + // to be a drop-in replacement for C++14's `std::integer_sequence`. + // + // Example: + // + // template< class T, T... Ints > + // void user_function(integer_sequence); + // + // int main() + // { + // // user_function's `T` will be deduced to `int` and `Ints...` + // // will be deduced to `0, 1, 2, 3, 4`. + // user_function(make_integer_sequence()); + // } + template + struct integer_sequence + { + using value_type = T; + static constexpr size_t size() noexcept + { + return sizeof...(Ints); + } + }; + + // index_sequence + // + // A helper template for an `integer_sequence` of `size_t`, + // `absl::index_sequence` is designed to be a drop-in replacement for C++14's + // `std::index_sequence`. + template + using index_sequence = integer_sequence; + + namespace utility_internal + { + + template + struct Extend; + + // Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency. + template + struct Extend, SeqSize, 0> + { + using type = integer_sequence; + }; + + template + struct Extend, SeqSize, 1> + { + using type = integer_sequence; + }; + + // Recursion helper for 'make_integer_sequence'. + // 'Gen::type' is an alias for 'integer_sequence'. + template + struct Gen + { + using type = + typename Extend::type, N / 2, N % 2>::type; + }; + + template + struct Gen + { + using type = integer_sequence; + }; + + template + struct InPlaceTypeTag + { + explicit InPlaceTypeTag() = delete; + InPlaceTypeTag(const InPlaceTypeTag&) = delete; + InPlaceTypeTag& operator=(const InPlaceTypeTag&) = delete; + }; + + template + struct InPlaceIndexTag + { + explicit InPlaceIndexTag() = delete; + InPlaceIndexTag(const InPlaceIndexTag&) = delete; + InPlaceIndexTag& operator=(const InPlaceIndexTag&) = delete; + }; + + } // namespace utility_internal + + // Compile-time sequences of integers + + // make_integer_sequence + // + // This template alias is equivalent to + // `integer_sequence`, and is designed to be a drop-in + // replacement for C++14's `std::make_integer_sequence`. + template + using make_integer_sequence = typename utility_internal::Gen::type; + + // make_index_sequence + // + // This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`, + // and is designed to be a drop-in replacement for C++14's + // `std::make_index_sequence`. + template + using make_index_sequence = make_integer_sequence; + + // index_sequence_for + // + // Converts a typename pack into an index sequence of the same length, and + // is designed to be a drop-in replacement for C++14's + // `std::index_sequence_for()` + template + using index_sequence_for = make_index_sequence; + + // Tag types + +#ifdef ABSL_USES_STD_OPTIONAL + + using std::in_place; + using std::in_place_t; + +#else // ABSL_USES_STD_OPTIONAL + + // in_place_t + // + // Tag type used to specify in-place construction, such as with + // `absl::optional`, designed to be a drop-in replacement for C++17's + // `std::in_place_t`. + struct in_place_t + { + }; + + ABSL_INTERNAL_INLINE_CONSTEXPR(in_place_t, in_place, {}); + +#endif // ABSL_USES_STD_OPTIONAL + +#if defined(ABSL_USES_STD_ANY) || defined(ABSL_USES_STD_VARIANT) + using std::in_place_type; + using std::in_place_type_t; +#else + + // in_place_type_t + // + // Tag type used for in-place construction when the type to construct needs to + // be specified, such as with `absl::any`, designed to be a drop-in replacement + // for C++17's `std::in_place_type_t`. + template + using in_place_type_t = void (*)(utility_internal::InPlaceTypeTag); + + template + void in_place_type(utility_internal::InPlaceTypeTag) + { + } +#endif // ABSL_USES_STD_ANY || ABSL_USES_STD_VARIANT + +#ifdef ABSL_USES_STD_VARIANT + using std::in_place_index; + using std::in_place_index_t; +#else + + // in_place_index_t + // + // Tag type used for in-place construction when the type to construct needs to + // be specified, such as with `absl::any`, designed to be a drop-in replacement + // for C++17's `std::in_place_index_t`. + template + using in_place_index_t = void (*)(utility_internal::InPlaceIndexTag); + + template + void in_place_index(utility_internal::InPlaceIndexTag) + { + } +#endif // ABSL_USES_STD_VARIANT + + // Constexpr move and forward + + // move() + // + // A constexpr version of `std::move()`, designed to be a drop-in replacement + // for C++14's `std::move()`. + template + constexpr absl::remove_reference_t&& move(T&& t) noexcept + { + return static_cast&&>(t); + } + + // forward() + // + // A constexpr version of `std::forward()`, designed to be a drop-in replacement + // for C++14's `std::forward()`. + template + constexpr T&& forward( + absl::remove_reference_t& t + ) noexcept + { // NOLINT(runtime/references) + return static_cast(t); + } + + namespace utility_internal + { + // Helper method for expanding tuple into a called method. + template + auto apply_helper(Functor&& functor, Tuple&& t, index_sequence) + -> decltype(absl::base_internal::invoke( + absl::forward(functor), + std::get(absl::forward(t))... + )) + { + return absl::base_internal::invoke( + absl::forward(functor), + std::get(absl::forward(t))... + ); + } + + } // namespace utility_internal + + // apply + // + // Invokes a Callable using elements of a tuple as its arguments. + // Each element of the tuple corresponds to an argument of the call (in order). + // Both the Callable argument and the tuple argument are perfect-forwarded. + // For member-function Callables, the first tuple element acts as the `this` + // pointer. `absl::apply` is designed to be a drop-in replacement for C++17's + // `std::apply`. Unlike C++17's `std::apply`, this is not currently `constexpr`. + // + // Example: + // + // class Foo { + // public: + // void Bar(int); + // }; + // void user_function1(int, std::string); + // void user_function2(std::unique_ptr); + // auto user_lambda = [](int, int) {}; + // + // int main() + // { + // std::tuple tuple1(42, "bar"); + // // Invokes the first user function on int, std::string. + // absl::apply(&user_function1, tuple1); + // + // std::tuple> tuple2(absl::make_unique()); + // // Invokes the user function that takes ownership of the unique + // // pointer. + // absl::apply(&user_function2, std::move(tuple2)); + // + // auto foo = absl::make_unique(); + // std::tuple tuple3(foo.get(), 42); + // // Invokes the method Bar on foo with one argument, 42. + // absl::apply(&Foo::Bar, tuple3); + // + // std::tuple tuple4(8, 9); + // // Invokes a lambda. + // absl::apply(user_lambda, tuple4); + // } + template + auto apply(Functor&& functor, Tuple&& t) + -> decltype(utility_internal::apply_helper( + absl::forward(functor), absl::forward(t), absl::make_index_sequence::type>::value>{} + )) + { + return utility_internal::apply_helper( + absl::forward(functor), absl::forward(t), absl::make_index_sequence::type>::value>{} + ); + } + + // exchange + // + // Replaces the value of `obj` with `new_value` and returns the old value of + // `obj`. `absl::exchange` is designed to be a drop-in replacement for C++14's + // `std::exchange`. + // + // Example: + // + // Foo& operator=(Foo&& other) { + // ptr1_ = absl::exchange(other.ptr1_, nullptr); + // int1_ = absl::exchange(other.int1_, -1); + // return *this; + // } + template + T exchange(T& obj, U&& new_value) + { + T old_value = absl::move(obj); + obj = absl::forward(new_value); + return old_value; + } + + namespace utility_internal + { + template + T make_from_tuple_impl(Tuple&& tup, absl::index_sequence) + { + return T(std::get(std::forward(tup))...); + } + } // namespace utility_internal + + // make_from_tuple + // + // Given the template parameter type `T` and a tuple of arguments + // `std::tuple(arg0, arg1, ..., argN)` constructs an object of type `T` as if by + // calling `T(arg0, arg1, ..., argN)`. + // + // Example: + // + // std::tuple args("hello world", 5); + // auto s = absl::make_from_tuple(args); + // assert(s == "hello"); + // + template + constexpr T make_from_tuple(Tuple&& tup) + { + return utility_internal::make_from_tuple_impl( + std::forward(tup), + absl::make_index_sequence< + std::tuple_size>::value>{} + ); + } + + ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_UTILITY_UTILITY_H_ diff --git a/CAPI/cpp/grpc/include/ares.h b/CAPI/cpp/grpc/include/ares.h new file mode 100644 index 00000000..2d5583e5 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares.h @@ -0,0 +1,753 @@ +/* MIT License + * + * Copyright (c) Massachusetts Institute of Technology + * Copyright (c) Daniel Stenberg + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ + +#ifndef ARES__H +#define ARES__H + +#include "ares_version.h" /* c-ares version defines */ +#include "ares_build.h" /* c-ares build definitions */ +#include "ares_rules.h" /* c-ares rules enforcement */ + +/* + * Define WIN32 when build target is Win32 API + */ + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) && \ + !defined(__SYMBIAN32__) +#define WIN32 +#endif + +#include + +/* HP-UX systems version 9, 10 and 11 lack sys/select.h and so does oldish + libc5-based Linux systems. Only include it on system that are known to + require it! */ +#if defined(_AIX) || defined(__NOVELL_LIBC__) || defined(__NetBSD__) || \ + defined(__minix) || defined(__SYMBIAN32__) || defined(__INTEGRITY) || \ + defined(ANDROID) || defined(__ANDROID__) || defined(__OpenBSD__) || \ + defined(__QNXNTO__) || defined(__MVS__) || defined(__HAIKU__) +#include +#endif +#if (defined(NETWARE) && !defined(__NOVELL_LIBC__)) +#include +#endif + +#if defined(WATT32) +#include +#include +#include +#elif defined(_WIN32_WCE) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#elif defined(WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#include +/* To aid with linking against a static c-ares build, lets tell the microsoft + * compiler to pull in needed dependencies */ +#ifdef _MSC_VER +#pragma comment(lib, "ws2_32") +#pragma comment(lib, "advapi32") +#pragma comment(lib, "iphlpapi") +#endif +#else +#include +#include +#endif + +#if defined(ANDROID) || defined(__ANDROID__) +#include +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* + ** c-ares external API function linkage decorations. + */ + +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__SYMBIAN32__) +#if 1 +#define CARES_EXTERN +#else +#ifdef CARES_BUILDING_LIBRARY +#define CARES_EXTERN __declspec(dllexport) +#else +#define CARES_EXTERN __declspec(dllimport) +#endif +#endif +#else +#if defined(__GNUC__) && __GNUC__ >= 4 +#define CARES_EXTERN __attribute__((visibility("default"))) +#elif defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 900 +#define CARES_EXTERN __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) +#define CARES_EXTERN _global +#else +#define CARES_EXTERN +#endif +#endif + + typedef enum + { + ARES_SUCCESS = 0, + + /* Server error codes (ARES_ENODATA indicates no relevant answer) */ + ARES_ENODATA = 1, + ARES_EFORMERR = 2, + ARES_ESERVFAIL = 3, + ARES_ENOTFOUND = 4, + ARES_ENOTIMP = 5, + ARES_EREFUSED = 6, + + /* Locally generated error codes */ + ARES_EBADQUERY = 7, + ARES_EBADNAME = 8, + ARES_EBADFAMILY = 9, + ARES_EBADRESP = 10, + ARES_ECONNREFUSED = 11, + ARES_ETIMEOUT = 12, + ARES_EOF = 13, + ARES_EFILE = 14, + ARES_ENOMEM = 15, + ARES_EDESTRUCTION = 16, + ARES_EBADSTR = 17, + + /* ares_getnameinfo error codes */ + ARES_EBADFLAGS = 18, + + /* ares_getaddrinfo error codes */ + ARES_ENONAME = 19, + ARES_EBADHINTS = 20, + + /* Uninitialized library error code */ + ARES_ENOTINITIALIZED = 21, /* introduced in 1.7.0 */ + + /* ares_library_init error codes */ + ARES_ELOADIPHLPAPI = 22, /* introduced in 1.7.0 */ + ARES_EADDRGETNETWORKPARAMS = 23, /* introduced in 1.7.0 */ + + /* More error codes */ + ARES_ECANCELLED = 24, /* introduced in 1.7.0 */ + + /* More ares_getaddrinfo error codes */ + ARES_ESERVICE = 25, /* ares_getaddrinfo() was passed a text service name that + * is not recognized. introduced in 1.16.0 */ + + ARES_ENOSERVER = 26 /* No DNS servers were configured */ + } ares_status_t; + + typedef enum + { + ARES_FALSE = 0, + ARES_TRUE = 1 + } ares_bool_t; + + /*! Values for ARES_OPT_EVENT_THREAD */ + typedef enum + { + /*! Default (best choice) event system */ + ARES_EVSYS_DEFAULT = 0, + /*! Win32 IOCP/AFD_POLL event system */ + ARES_EVSYS_WIN32 = 1, + /*! Linux epoll */ + ARES_EVSYS_EPOLL = 2, + /*! BSD/MacOS kqueue */ + ARES_EVSYS_KQUEUE = 3, + /*! POSIX poll() */ + ARES_EVSYS_POLL = 4, + /*! last fallback on Unix-like systems, select() */ + ARES_EVSYS_SELECT = 5 + } ares_evsys_t; + +/* Flag values */ +#define ARES_FLAG_USEVC (1 << 0) +#define ARES_FLAG_PRIMARY (1 << 1) +#define ARES_FLAG_IGNTC (1 << 2) +#define ARES_FLAG_NORECURSE (1 << 3) +#define ARES_FLAG_STAYOPEN (1 << 4) +#define ARES_FLAG_NOSEARCH (1 << 5) +#define ARES_FLAG_NOALIASES (1 << 6) +#define ARES_FLAG_NOCHECKRESP (1 << 7) +#define ARES_FLAG_EDNS (1 << 8) +#define ARES_FLAG_NO_DFLT_SVR (1 << 9) + +/* Option mask values */ +#define ARES_OPT_FLAGS (1 << 0) +#define ARES_OPT_TIMEOUT (1 << 1) +#define ARES_OPT_TRIES (1 << 2) +#define ARES_OPT_NDOTS (1 << 3) +#define ARES_OPT_UDP_PORT (1 << 4) +#define ARES_OPT_TCP_PORT (1 << 5) +#define ARES_OPT_SERVERS (1 << 6) +#define ARES_OPT_DOMAINS (1 << 7) +#define ARES_OPT_LOOKUPS (1 << 8) +#define ARES_OPT_SOCK_STATE_CB (1 << 9) +#define ARES_OPT_SORTLIST (1 << 10) +#define ARES_OPT_SOCK_SNDBUF (1 << 11) +#define ARES_OPT_SOCK_RCVBUF (1 << 12) +#define ARES_OPT_TIMEOUTMS (1 << 13) +#define ARES_OPT_ROTATE (1 << 14) +#define ARES_OPT_EDNSPSZ (1 << 15) +#define ARES_OPT_NOROTATE (1 << 16) +#define ARES_OPT_RESOLVCONF (1 << 17) +#define ARES_OPT_HOSTS_FILE (1 << 18) +#define ARES_OPT_UDP_MAX_QUERIES (1 << 19) +#define ARES_OPT_MAXTIMEOUTMS (1 << 20) +#define ARES_OPT_QUERY_CACHE (1 << 21) +#define ARES_OPT_EVENT_THREAD (1 << 22) + +/* Nameinfo flag values */ +#define ARES_NI_NOFQDN (1 << 0) +#define ARES_NI_NUMERICHOST (1 << 1) +#define ARES_NI_NAMEREQD (1 << 2) +#define ARES_NI_NUMERICSERV (1 << 3) +#define ARES_NI_DGRAM (1 << 4) +#define ARES_NI_TCP 0 +#define ARES_NI_UDP ARES_NI_DGRAM +#define ARES_NI_SCTP (1 << 5) +#define ARES_NI_DCCP (1 << 6) +#define ARES_NI_NUMERICSCOPE (1 << 7) +#define ARES_NI_LOOKUPHOST (1 << 8) +#define ARES_NI_LOOKUPSERVICE (1 << 9) +/* Reserved for future use */ +#define ARES_NI_IDN (1 << 10) +#define ARES_NI_IDN_ALLOW_UNASSIGNED (1 << 11) +#define ARES_NI_IDN_USE_STD3_ASCII_RULES (1 << 12) + +/* Addrinfo flag values */ +#define ARES_AI_CANONNAME (1 << 0) +#define ARES_AI_NUMERICHOST (1 << 1) +#define ARES_AI_PASSIVE (1 << 2) +#define ARES_AI_NUMERICSERV (1 << 3) +#define ARES_AI_V4MAPPED (1 << 4) +#define ARES_AI_ALL (1 << 5) +#define ARES_AI_ADDRCONFIG (1 << 6) +#define ARES_AI_NOSORT (1 << 7) +#define ARES_AI_ENVHOSTS (1 << 8) +/* Reserved for future use */ +#define ARES_AI_IDN (1 << 10) +#define ARES_AI_IDN_ALLOW_UNASSIGNED (1 << 11) +#define ARES_AI_IDN_USE_STD3_ASCII_RULES (1 << 12) +#define ARES_AI_CANONIDN (1 << 13) + +#define ARES_AI_MASK \ + (ARES_AI_CANONNAME | ARES_AI_NUMERICHOST | ARES_AI_PASSIVE | \ + ARES_AI_NUMERICSERV | ARES_AI_V4MAPPED | ARES_AI_ALL | ARES_AI_ADDRCONFIG) +#define ARES_GETSOCK_MAXNUM \ + 16 /* ares_getsock() can return info about this \ + many sockets */ +#define ARES_GETSOCK_READABLE(bits, num) (bits & (1 << (num))) +#define ARES_GETSOCK_WRITABLE(bits, num) \ + (bits & (1 << ((num) + ARES_GETSOCK_MAXNUM))) + +/* c-ares library initialization flag values */ +#define ARES_LIB_INIT_NONE (0) +#define ARES_LIB_INIT_WIN32 (1 << 0) +#define ARES_LIB_INIT_ALL (ARES_LIB_INIT_WIN32) + + /* + * Typedef our socket type + */ + +#ifndef ares_socket_typedef +#ifdef WIN32 + typedef SOCKET ares_socket_t; +#define ARES_SOCKET_BAD INVALID_SOCKET +#else + typedef int ares_socket_t; +#define ARES_SOCKET_BAD -1 +#endif +#define ares_socket_typedef +#endif /* ares_socket_typedef */ + + typedef void (*ares_sock_state_cb)(void* data, ares_socket_t socket_fd, int readable, int writable); + + struct apattern; + + /* NOTE about the ares_options struct to users and developers. + + This struct will remain looking like this. It will not be extended nor + shrunk in future releases, but all new options will be set by ares_set_*() + options instead of with the ares_init_options() function. + + Eventually (in a galaxy far far away), all options will be settable by + ares_set_*() options and the ares_init_options() function will become + deprecated. + + When new options are added to c-ares, they are not added to this + struct. And they are not "saved" with the ares_save_options() function but + instead we encourage the use of the ares_dup() function. Needless to say, + if you add config options to c-ares you need to make sure ares_dup() + duplicates this new option. + + */ + struct ares_options + { + int flags; + int timeout; /* in seconds or milliseconds, depending on options */ + int tries; + int ndots; + unsigned short udp_port; /* host byte order */ + unsigned short tcp_port; /* host byte order */ + int socket_send_buffer_size; + int socket_receive_buffer_size; + struct in_addr* servers; + int nservers; + char** domains; + int ndomains; + char* lookups; + ares_sock_state_cb sock_state_cb; + void* sock_state_cb_data; + struct apattern* sortlist; + int nsort; + int ednspsz; + char* resolvconf_path; + char* hosts_path; + int udp_max_queries; + int maxtimeout; /* in milliseconds */ + unsigned int qcache_max_ttl; /* Maximum TTL for query cache, 0=disabled */ + ares_evsys_t evsys; + }; + + struct hostent; + struct timeval; + struct sockaddr; + struct ares_channeldata; + struct ares_addrinfo; + struct ares_addrinfo_hints; + + /* Legacy typedef, don't use, you can't specify "const" */ + typedef struct ares_channeldata* ares_channel; + + /* Current main channel typedef */ + typedef struct ares_channeldata ares_channel_t; + + typedef void (*ares_callback)(void* arg, int status, int timeouts, unsigned char* abuf, int alen); + + typedef void (*ares_host_callback)(void* arg, int status, int timeouts, struct hostent* hostent); + + typedef void (*ares_nameinfo_callback)(void* arg, int status, int timeouts, char* node, char* service); + + typedef int (*ares_sock_create_callback)(ares_socket_t socket_fd, int type, void* data); + + typedef int (*ares_sock_config_callback)(ares_socket_t socket_fd, int type, void* data); + + typedef void (*ares_addrinfo_callback)(void* arg, int status, int timeouts, struct ares_addrinfo* res); + + CARES_EXTERN int ares_library_init(int flags); + + CARES_EXTERN int ares_library_init_mem(int flags, void* (*amalloc)(size_t size), void (*afree)(void* ptr), void* (*arealloc)(void* ptr, size_t size)); + +#if defined(ANDROID) || defined(__ANDROID__) + CARES_EXTERN void ares_library_init_jvm(JavaVM* jvm); + CARES_EXTERN int ares_library_init_android(jobject connectivity_manager); + CARES_EXTERN int ares_library_android_initialized(void); +#endif + + CARES_EXTERN int ares_library_initialized(void); + + CARES_EXTERN void ares_library_cleanup(void); + + CARES_EXTERN const char* ares_version(int* version); + + CARES_EXTERN int ares_init(ares_channel_t** channelptr); + + CARES_EXTERN int ares_init_options(ares_channel_t** channelptr, const struct ares_options* options, int optmask); + + CARES_EXTERN int ares_save_options(ares_channel_t* channel, struct ares_options* options, int* optmask); + + CARES_EXTERN void ares_destroy_options(struct ares_options* options); + + CARES_EXTERN int ares_dup(ares_channel_t** dest, ares_channel_t* src); + + CARES_EXTERN ares_status_t ares_reinit(ares_channel_t* channel); + + CARES_EXTERN void ares_destroy(ares_channel_t* channel); + + CARES_EXTERN void ares_cancel(ares_channel_t* channel); + + /* These next 3 configure local binding for the out-going socket + * connection. Use these to specify source IP and/or network device + * on multi-homed systems. + */ + CARES_EXTERN void ares_set_local_ip4(ares_channel_t* channel, unsigned int local_ip); + + /* local_ip6 should be 16 bytes in length */ + CARES_EXTERN void ares_set_local_ip6(ares_channel_t* channel, const unsigned char* local_ip6); + + /* local_dev_name should be null terminated. */ + CARES_EXTERN void ares_set_local_dev(ares_channel_t* channel, const char* local_dev_name); + + CARES_EXTERN void ares_set_socket_callback(ares_channel_t* channel, ares_sock_create_callback callback, void* user_data); + + CARES_EXTERN void ares_set_socket_configure_callback( + ares_channel_t* channel, ares_sock_config_callback callback, void* user_data + ); + + CARES_EXTERN int ares_set_sortlist(ares_channel_t* channel, const char* sortstr); + + CARES_EXTERN void ares_getaddrinfo(ares_channel_t* channel, const char* node, const char* service, const struct ares_addrinfo_hints* hints, ares_addrinfo_callback callback, void* arg); + + CARES_EXTERN void ares_freeaddrinfo(struct ares_addrinfo* ai); + + /* + * Virtual function set to have user-managed socket IO. + * Note that all functions need to be defined, and when + * set, the library will not do any bind nor set any + * socket options, assuming the client handles these + * through either socket creation or the + * ares_sock_config_callback call. + */ + struct iovec; + + struct ares_socket_functions + { + ares_socket_t (*asocket)(int, int, int, void*); + int (*aclose)(ares_socket_t, void*); + int (*aconnect)(ares_socket_t, const struct sockaddr*, ares_socklen_t, void*); + ares_ssize_t (*arecvfrom)(ares_socket_t, void*, size_t, int, struct sockaddr*, ares_socklen_t*, void*); + ares_ssize_t (*asendv)(ares_socket_t, const struct iovec*, int, void*); + }; + + CARES_EXTERN void + ares_set_socket_functions(ares_channel_t* channel, const struct ares_socket_functions* funcs, void* user_data); + + CARES_EXTERN void ares_send(ares_channel_t* channel, const unsigned char* qbuf, int qlen, ares_callback callback, void* arg); + + CARES_EXTERN void ares_query(ares_channel_t* channel, const char* name, int dnsclass, int type, ares_callback callback, void* arg); + + CARES_EXTERN void ares_search(ares_channel_t* channel, const char* name, int dnsclass, int type, ares_callback callback, void* arg); + + CARES_EXTERN void ares_gethostbyname(ares_channel_t* channel, const char* name, int family, ares_host_callback callback, void* arg); + + CARES_EXTERN int ares_gethostbyname_file(ares_channel_t* channel, const char* name, int family, struct hostent** host); + + CARES_EXTERN void ares_gethostbyaddr(ares_channel_t* channel, const void* addr, int addrlen, int family, ares_host_callback callback, void* arg); + + CARES_EXTERN void ares_getnameinfo(ares_channel_t* channel, const struct sockaddr* sa, ares_socklen_t salen, int flags, ares_nameinfo_callback callback, void* arg); + + CARES_EXTERN int ares_fds(ares_channel_t* channel, fd_set* read_fds, fd_set* write_fds); + + CARES_EXTERN int ares_getsock(ares_channel_t* channel, ares_socket_t* socks, int numsocks); + + CARES_EXTERN struct timeval* ares_timeout(ares_channel_t* channel, struct timeval* maxtv, struct timeval* tv); + + CARES_EXTERN void ares_process(ares_channel_t* channel, fd_set* read_fds, fd_set* write_fds); + + CARES_EXTERN void ares_process_fd(ares_channel_t* channel, ares_socket_t read_fd, ares_socket_t write_fd); + + CARES_EXTERN int ares_create_query(const char* name, int dnsclass, int type, unsigned short id, int rd, unsigned char** buf, int* buflen, int max_udp_size); + + CARES_EXTERN int ares_mkquery(const char* name, int dnsclass, int type, unsigned short id, int rd, unsigned char** buf, int* buflen); + + CARES_EXTERN int ares_expand_name(const unsigned char* encoded, const unsigned char* abuf, int alen, char** s, long* enclen); + + CARES_EXTERN int ares_expand_string(const unsigned char* encoded, const unsigned char* abuf, int alen, unsigned char** s, long* enclen); + + /* + * NOTE: before c-ares 1.7.0 we would most often use the system in6_addr + * struct below when ares itself was built, but many apps would use this + * private version since the header checked a HAVE_* define for it. Starting + * with 1.7.0 we always declare and use our own to stop relying on the + * system's one. + */ + struct ares_in6_addr + { + union + { + unsigned char _S6_u8[16]; + } _S6_un; + }; + + struct ares_addr + { + int family; + + union + { + struct in_addr addr4; + struct ares_in6_addr addr6; + } addr; + }; + + struct ares_addrttl + { + struct in_addr ipaddr; + int ttl; + }; + + struct ares_addr6ttl + { + struct ares_in6_addr ip6addr; + int ttl; + }; + + struct ares_caa_reply + { + struct ares_caa_reply* next; + int critical; + unsigned char* property; + size_t plength; /* plength excludes null termination */ + unsigned char* value; + size_t length; /* length excludes null termination */ + }; + + struct ares_srv_reply + { + struct ares_srv_reply* next; + char* host; + unsigned short priority; + unsigned short weight; + unsigned short port; + }; + + struct ares_mx_reply + { + struct ares_mx_reply* next; + char* host; + unsigned short priority; + }; + + struct ares_txt_reply + { + struct ares_txt_reply* next; + unsigned char* txt; + size_t length; /* length excludes null termination */ + }; + + /* NOTE: This structure is a superset of ares_txt_reply + */ + struct ares_txt_ext + { + struct ares_txt_ext* next; + unsigned char* txt; + size_t length; + /* 1 - if start of new record + * 0 - if a chunk in the same record */ + unsigned char record_start; + }; + + struct ares_naptr_reply + { + struct ares_naptr_reply* next; + unsigned char* flags; + unsigned char* service; + unsigned char* regexp; + char* replacement; + unsigned short order; + unsigned short preference; + }; + + struct ares_soa_reply + { + char* nsname; + char* hostmaster; + unsigned int serial; + unsigned int refresh; + unsigned int retry; + unsigned int expire; + unsigned int minttl; + }; + + struct ares_uri_reply + { + struct ares_uri_reply* next; + unsigned short priority; + unsigned short weight; + char* uri; + int ttl; + }; + + /* + * Similar to addrinfo, but with extra ttl and missing canonname. + */ + struct ares_addrinfo_node + { + int ai_ttl; + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; + ares_socklen_t ai_addrlen; + struct sockaddr* ai_addr; + struct ares_addrinfo_node* ai_next; + }; + + /* + * alias - label of the resource record. + * name - value (canonical name) of the resource record. + * See RFC2181 10.1.1. CNAME terminology. + */ + struct ares_addrinfo_cname + { + int ttl; + char* alias; + char* name; + struct ares_addrinfo_cname* next; + }; + + struct ares_addrinfo + { + struct ares_addrinfo_cname* cnames; + struct ares_addrinfo_node* nodes; + char* name; + }; + + struct ares_addrinfo_hints + { + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; + }; + + /* + ** Parse the buffer, starting at *abuf and of length alen bytes, previously + ** obtained from an ares_search call. Put the results in *host, if nonnull. + ** Also, if addrttls is nonnull, put up to *naddrttls IPv4 addresses along with + ** their TTLs in that array, and set *naddrttls to the number of addresses + ** so written. + */ + + CARES_EXTERN int ares_parse_a_reply(const unsigned char* abuf, int alen, struct hostent** host, struct ares_addrttl* addrttls, int* naddrttls); + + CARES_EXTERN int ares_parse_aaaa_reply(const unsigned char* abuf, int alen, struct hostent** host, struct ares_addr6ttl* addrttls, int* naddrttls); + + CARES_EXTERN int ares_parse_caa_reply(const unsigned char* abuf, int alen, struct ares_caa_reply** caa_out); + + CARES_EXTERN int ares_parse_ptr_reply(const unsigned char* abuf, int alen, const void* addr, int addrlen, int family, struct hostent** host); + + CARES_EXTERN int ares_parse_ns_reply(const unsigned char* abuf, int alen, struct hostent** host); + + CARES_EXTERN int ares_parse_srv_reply(const unsigned char* abuf, int alen, struct ares_srv_reply** srv_out); + + CARES_EXTERN int ares_parse_mx_reply(const unsigned char* abuf, int alen, struct ares_mx_reply** mx_out); + + CARES_EXTERN int ares_parse_txt_reply(const unsigned char* abuf, int alen, struct ares_txt_reply** txt_out); + + CARES_EXTERN int ares_parse_txt_reply_ext(const unsigned char* abuf, int alen, struct ares_txt_ext** txt_out); + + CARES_EXTERN int ares_parse_naptr_reply(const unsigned char* abuf, int alen, struct ares_naptr_reply** naptr_out); + + CARES_EXTERN int ares_parse_soa_reply(const unsigned char* abuf, int alen, struct ares_soa_reply** soa_out); + + CARES_EXTERN int ares_parse_uri_reply(const unsigned char* abuf, int alen, struct ares_uri_reply** uri_out); + + CARES_EXTERN void ares_free_string(void* str); + + CARES_EXTERN void ares_free_hostent(struct hostent* host); + + CARES_EXTERN void ares_free_data(void* dataptr); + + CARES_EXTERN const char* ares_strerror(int code); + + struct ares_addr_node + { + struct ares_addr_node* next; + int family; + + union + { + struct in_addr addr4; + struct ares_in6_addr addr6; + } addr; + }; + + struct ares_addr_port_node + { + struct ares_addr_port_node* next; + int family; + + union + { + struct in_addr addr4; + struct ares_in6_addr addr6; + } addr; + + int udp_port; + int tcp_port; + }; + + CARES_EXTERN int ares_set_servers(ares_channel_t* channel, const struct ares_addr_node* servers); + CARES_EXTERN int + ares_set_servers_ports(ares_channel_t* channel, const struct ares_addr_port_node* servers); + + /* Incoming string format: host[:port][,host[:port]]... */ + CARES_EXTERN int ares_set_servers_csv(ares_channel_t* channel, const char* servers); + CARES_EXTERN int ares_set_servers_ports_csv(ares_channel_t* channel, const char* servers); + CARES_EXTERN char* ares_get_servers_csv(ares_channel_t* channel); + + CARES_EXTERN int ares_get_servers(ares_channel_t* channel, struct ares_addr_node** servers); + CARES_EXTERN int ares_get_servers_ports(ares_channel_t* channel, struct ares_addr_port_node** servers); + + CARES_EXTERN const char* ares_inet_ntop(int af, const void* src, char* dst, ares_socklen_t size); + + CARES_EXTERN int ares_inet_pton(int af, const char* src, void* dst); + + /*! Whether or not the c-ares library was built with threadsafety + * + * \return ARES_TRUE if built with threadsafety, ARES_FALSE if not + */ + CARES_EXTERN ares_bool_t ares_threadsafety(void); + + /*! Block until notified that there are no longer any queries in queue, or + * the specified timeout has expired. + * + * \param[in] channel Initialized ares channel + * \param[in] timeout_ms Number of milliseconds to wait for the queue to be + * empty. -1 for Infinite. + * \return ARES_ENOTIMP if not built with threading support, ARES_ETIMEOUT + * if requested timeout expires, ARES_SUCCESS when queue is empty. + */ + CARES_EXTERN ares_status_t ares_queue_wait_empty(ares_channel_t* channel, int timeout_ms); + + /*! Retrieve the total number of active queries pending answers from servers. + * Some c-ares requests may spawn multiple queries, such as ares_getaddrinfo() + * when using AF_UNSPEC, which will be reflected in this number. + * + * \param[in] channel Initialized ares channel + * \return Number of active queries to servers + */ + CARES_EXTERN size_t ares_queue_active_queries(ares_channel_t* channel); + +#ifdef __cplusplus +} +#endif + +/* DNS record parser, writer, and helpers */ +#include "ares_dns_record.h" + +#endif /* ARES__H */ diff --git a/CAPI/cpp/grpc/include/ares_build.h b/CAPI/cpp/grpc/include/ares_build.h new file mode 100644 index 00000000..02414091 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_build.h @@ -0,0 +1,46 @@ +#ifndef __CARES_BUILD_H +#define __CARES_BUILD_H +/* + * Copyright (C) The c-ares project and its contributors + * SPDX-License-Identifier: MIT + */ + +#define CARES_TYPEOF_ARES_SOCKLEN_T socklen_t +#define CARES_TYPEOF_ARES_SSIZE_T __int64 + +/* Prefix names with CARES_ to make sure they don't conflict with other config.h + * files. We need to include some dependent headers that may be system specific + * for C-Ares */ +#define CARES_HAVE_SYS_TYPES_H +/* #undef CARES_HAVE_SYS_SOCKET_H */ +#define CARES_HAVE_WINDOWS_H +#define CARES_HAVE_WS2TCPIP_H +#define CARES_HAVE_WINSOCK2_H +#define CARES_HAVE_WINDOWS_H +/* #undef CARES_HAVE_ARPA_NAMESER_H */ +/* #undef CARES_HAVE_ARPA_NAMESER_COMPAT_H */ + +#ifdef CARES_HAVE_SYS_TYPES_H +#include +#endif + +#ifdef CARES_HAVE_SYS_SOCKET_H +#include +#endif + +#ifdef CARES_HAVE_WINSOCK2_H +#include +#endif + +#ifdef CARES_HAVE_WS2TCPIP_H +#include +#endif + +#ifdef CARES_HAVE_WINDOWS_H +#include +#endif + +typedef CARES_TYPEOF_ARES_SOCKLEN_T ares_socklen_t; +typedef CARES_TYPEOF_ARES_SSIZE_T ares_ssize_t; + +#endif /* __CARES_BUILD_H */ diff --git a/CAPI/cpp/grpc/include/ares_dns.h b/CAPI/cpp/grpc/include/ares_dns.h new file mode 100644 index 00000000..50261b42 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_dns.h @@ -0,0 +1,121 @@ +/* MIT License + * + * Copyright (c) Massachusetts Institute of Technology + * Copyright (c) The c-ares project and its contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ +#ifndef HEADER_CARES_DNS_H +#define HEADER_CARES_DNS_H + +/* + * NOTE TO INTEGRATORS: + * + * This header is made public due to legacy projects relying on it. + * Please do not use the macros within this header, or include this + * header in your project as it may be removed in the future. + */ + +/* + * Macro DNS__16BIT reads a network short (16 bit) given in network + * byte order, and returns its value as an unsigned short. + */ +#define DNS__16BIT(p) \ + ((unsigned short)((unsigned int)0xffff & (((unsigned int)((unsigned char)(p)[0]) << 8U) | ((unsigned int)((unsigned char)(p)[1]))))) + +/* + * Macro DNS__32BIT reads a network long (32 bit) given in network + * byte order, and returns its value as an unsigned int. + */ +#define DNS__32BIT(p) \ + ((unsigned int)(((unsigned int)((unsigned char)(p)[0]) << 24U) | ((unsigned int)((unsigned char)(p)[1]) << 16U) | ((unsigned int)((unsigned char)(p)[2]) << 8U) | ((unsigned int)((unsigned char)(p)[3])))) + +#define DNS__SET16BIT(p, v) \ + (((p)[0] = (unsigned char)(((v) >> 8) & 0xff)), \ + ((p)[1] = (unsigned char)((v)&0xff))) +#define DNS__SET32BIT(p, v) \ + (((p)[0] = (unsigned char)(((v) >> 24) & 0xff)), \ + ((p)[1] = (unsigned char)(((v) >> 16) & 0xff)), \ + ((p)[2] = (unsigned char)(((v) >> 8) & 0xff)), \ + ((p)[3] = (unsigned char)((v)&0xff))) + +#if 0 +/* we cannot use this approach on systems where we can't access 16/32 bit + data on un-aligned addresses */ +#define DNS__16BIT(p) ntohs(*(unsigned short*)(p)) +#define DNS__32BIT(p) ntohl(*(unsigned long*)(p)) +#define DNS__SET16BIT(p, v) *(unsigned short*)(p) = htons(v) +#define DNS__SET32BIT(p, v) *(unsigned long*)(p) = htonl(v) +#endif + +/* Macros for parsing a DNS header */ +#define DNS_HEADER_QID(h) DNS__16BIT(h) +#define DNS_HEADER_QR(h) (((h)[2] >> 7) & 0x1) +#define DNS_HEADER_OPCODE(h) (((h)[2] >> 3) & 0xf) +#define DNS_HEADER_AA(h) (((h)[2] >> 2) & 0x1) +#define DNS_HEADER_TC(h) (((h)[2] >> 1) & 0x1) +#define DNS_HEADER_RD(h) ((h)[2] & 0x1) +#define DNS_HEADER_RA(h) (((h)[3] >> 7) & 0x1) +#define DNS_HEADER_Z(h) (((h)[3] >> 4) & 0x7) +#define DNS_HEADER_RCODE(h) ((h)[3] & 0xf) +#define DNS_HEADER_QDCOUNT(h) DNS__16BIT((h) + 4) +#define DNS_HEADER_ANCOUNT(h) DNS__16BIT((h) + 6) +#define DNS_HEADER_NSCOUNT(h) DNS__16BIT((h) + 8) +#define DNS_HEADER_ARCOUNT(h) DNS__16BIT((h) + 10) + +/* Macros for constructing a DNS header */ +#define DNS_HEADER_SET_QID(h, v) DNS__SET16BIT(h, v) +#define DNS_HEADER_SET_QR(h, v) ((h)[2] |= (unsigned char)(((v)&0x1) << 7)) +#define DNS_HEADER_SET_OPCODE(h, v) \ + ((h)[2] |= (unsigned char)(((v)&0xf) << 3)) +#define DNS_HEADER_SET_AA(h, v) ((h)[2] |= (unsigned char)(((v)&0x1) << 2)) +#define DNS_HEADER_SET_TC(h, v) ((h)[2] |= (unsigned char)(((v)&0x1) << 1)) +#define DNS_HEADER_SET_RD(h, v) ((h)[2] |= (unsigned char)((v)&0x1)) +#define DNS_HEADER_SET_RA(h, v) ((h)[3] |= (unsigned char)(((v)&0x1) << 7)) +#define DNS_HEADER_SET_Z(h, v) ((h)[3] |= (unsigned char)(((v)&0x7) << 4)) +#define DNS_HEADER_SET_RCODE(h, v) ((h)[3] |= (unsigned char)((v)&0xf)) +#define DNS_HEADER_SET_QDCOUNT(h, v) DNS__SET16BIT((h) + 4, v) +#define DNS_HEADER_SET_ANCOUNT(h, v) DNS__SET16BIT((h) + 6, v) +#define DNS_HEADER_SET_NSCOUNT(h, v) DNS__SET16BIT((h) + 8, v) +#define DNS_HEADER_SET_ARCOUNT(h, v) DNS__SET16BIT((h) + 10, v) + +/* Macros for parsing the fixed part of a DNS question */ +#define DNS_QUESTION_TYPE(q) DNS__16BIT(q) +#define DNS_QUESTION_CLASS(q) DNS__16BIT((q) + 2) + +/* Macros for constructing the fixed part of a DNS question */ +#define DNS_QUESTION_SET_TYPE(q, v) DNS__SET16BIT(q, v) +#define DNS_QUESTION_SET_CLASS(q, v) DNS__SET16BIT((q) + 2, v) + +/* Macros for parsing the fixed part of a DNS resource record */ +#define DNS_RR_TYPE(r) DNS__16BIT(r) +#define DNS_RR_CLASS(r) DNS__16BIT((r) + 2) +#define DNS_RR_TTL(r) DNS__32BIT((r) + 4) +#define DNS_RR_LEN(r) DNS__16BIT((r) + 8) + +/* Macros for constructing the fixed part of a DNS resource record */ +#define DNS_RR_SET_TYPE(r, v) DNS__SET16BIT(r, v) +#define DNS_RR_SET_CLASS(r, v) DNS__SET16BIT((r) + 2, v) +#define DNS_RR_SET_TTL(r, v) DNS__SET32BIT((r) + 4, v) +#define DNS_RR_SET_LEN(r, v) DNS__SET16BIT((r) + 8, v) + +#endif /* HEADER_CARES_DNS_H */ diff --git a/CAPI/cpp/grpc/include/ares_dns_record.h b/CAPI/cpp/grpc/include/ares_dns_record.h new file mode 100644 index 00000000..8a9b3015 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_dns_record.h @@ -0,0 +1,930 @@ +/* MIT License + * + * Copyright (c) 2023 Brad House + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ +#ifndef __ARES_DNS_RECORD_H +#define __ARES_DNS_RECORD_H + +/* Include ares.h, not this file directly */ + +#ifdef __cplusplus +extern "C" +{ +#endif + + /*! \addtogroup ares_dns_record DNS Record Handling + * + * This is a set of functions to create and manipulate DNS records. + * + * @{ + */ + + /*! DNS Record types handled by c-ares. Some record types may only be valid + * on requests (e.g. ARES_REC_TYPE_ANY), and some may only be valid on + * responses */ + typedef enum + { + ARES_REC_TYPE_A = 1, /*!< Host address. */ + ARES_REC_TYPE_NS = 2, /*!< Authoritative server. */ + ARES_REC_TYPE_CNAME = 5, /*!< Canonical name. */ + ARES_REC_TYPE_SOA = 6, /*!< Start of authority zone. */ + ARES_REC_TYPE_PTR = 12, /*!< Domain name pointer. */ + ARES_REC_TYPE_HINFO = 13, /*!< Host information. */ + ARES_REC_TYPE_MX = 15, /*!< Mail routing information. */ + ARES_REC_TYPE_TXT = 16, /*!< Text strings. */ + ARES_REC_TYPE_AAAA = 28, /*!< RFC 3596. Ip6 Address. */ + ARES_REC_TYPE_SRV = 33, /*!< RFC 2782. Server Selection. */ + ARES_REC_TYPE_NAPTR = 35, /*!< RFC 3403. Naming Authority Pointer */ + ARES_REC_TYPE_OPT = 41, /*!< RFC 6891. EDNS0 option (meta-RR) */ + + ARES_REC_TYPE_TLSA = 52, /*!< RFC 6698. DNS-Based Authentication of Named + * Entities (DANE) Transport Layer Security + * (TLS) Protocol: TLSA */ + ARES_REC_TYPE_SVCB = 64, /*!< RFC 9460. General Purpose Service Binding */ + ARES_REC_TYPE_HTTPS = 65, /*!< RFC 9460. Service Binding type for use with + * HTTPS */ + ARES_REC_TYPE_ANY = 255, /*!< Wildcard match. Not response RR. */ + ARES_REC_TYPE_URI = 256, /*!< RFC 7553. Uniform Resource Identifier */ + ARES_REC_TYPE_CAA = 257, /*!< RFC 6844. Certification Authority + * Authorization. */ + ARES_REC_TYPE_RAW_RR = 65536 /*!< Used as an indicator that the RR record + * is not parsed, but provided in wire + * format */ + } ares_dns_rec_type_t; + + /*! DNS Classes for requests and responses. */ + typedef enum + { + ARES_CLASS_IN = 1, /*!< Internet */ + ARES_CLASS_CHAOS = 3, /*!< CHAOS */ + ARES_CLASS_HESOID = 4, /*!< Hesoid [Dyer 87] */ + ARES_CLASS_NONE = 254, /*!< RFC 2136 */ + ARES_CLASS_ANY = 255 /*!< Any class (requests only) */ + } ares_dns_class_t; + + /*! DNS RR Section type */ + typedef enum + { + ARES_SECTION_ANSWER = 1, /*!< Answer section */ + ARES_SECTION_AUTHORITY = 2, /*!< Authority section */ + ARES_SECTION_ADDITIONAL = 3 /*!< Additional information section */ + } ares_dns_section_t; + + /*! DNS Header opcodes */ + typedef enum + { + ARES_OPCODE_QUERY = 0, /*!< Standard query */ + ARES_OPCODE_IQUERY = 1, /*!< Inverse query. Obsolete. */ + ARES_OPCODE_STATUS = 2, /*!< Name server status query */ + ARES_OPCODE_NOTIFY = 4, /*!< Zone change notification (RFC 1996) */ + ARES_OPCODE_UPDATE = 5, /*!< Zone update message (RFC2136) */ + } ares_dns_opcode_t; + + /*! DNS Header flags */ + typedef enum + { + ARES_FLAG_QR = 1 << 0, /*!< QR. If set, is a response */ + ARES_FLAG_AA = 1 << 1, /*!< Authoritative Answer. If set, is authoritative */ + ARES_FLAG_TC = 1 << 2, /*!< Truncation. If set, is truncated response */ + ARES_FLAG_RD = 1 << 3, /*!< Recursion Desired. If set, recursion is desired */ + ARES_FLAG_RA = 1 << 4, /*!< Recursion Available. If set, server supports + * recursion */ + ARES_FLAG_AD = 1 << 5, /*!< RFC 2065. Authentic Data bit indicates in a + * response that the data included has been verified by + * the server providing it */ + ARES_FLAG_CD = 1 << 6, /*!< RFC 2065. Checking Disabled bit indicates in a + * query that non-verified data is acceptable to the + * resolver sending the query. */ + } ares_dns_flags_t; + + /*! DNS Response Codes from server */ + typedef enum + { + ARES_RCODE_NOERROR = 0, /*!< Success */ + ARES_RCODE_FORMERR = 1, /*!< Format error. The name server was unable + * to interpret the query. */ + ARES_RCODE_SERVFAIL = 2, /*!< Server Failure. The name server was + * unable to process this query due to a + * problem with the nameserver */ + ARES_RCODE_NXDOMAIN = 3, /*!< Name Error. Meaningful only for + * responses from an authoritative name + * server, this code signifies that the + * domain name referenced in the query does + * not exist. */ + ARES_RCODE_NOTIMP = 4, /*!< Not implemented. The name server does + * not support the requested kind of + * query */ + ARES_RCODE_REFUSED = 5, /*!< Refused. The name server refuses to + * perform the specified operation for + * policy reasons. */ + ARES_RCODE_YXDOMAIN = 6, /*!< RFC 2136. Some name that ought not to + * exist, does exist. */ + ARES_RCODE_YXRRSET = 7, /*!< RFC 2136. Some RRset that ought to not + * exist, does exist. */ + ARES_RCODE_NXRRSET = 8, /*!< RFC 2136. Some RRset that ought to exist, + * does not exist. */ + ARES_RCODE_NOTAUTH = 9, /*!< RFC 2136. The server is not authoritative + * for the zone named in the Zone section. + */ + ARES_RCODE_NOTZONE = 10, /*!< RFC 2136. A name used in the Prerequisite + * or Update Section is not within the zone + * denoted by the Zone Section. */ + ARES_RCODE_DSOTYPEI = 11, /*!< RFC 8409. DSO-TYPE Not implemented */ + ARES_RCODE_BADSIG = 16, /*!< RFC 8945. TSIG Signature Failure */ + ARES_RCODE_BADKEY = 17, /*!< RFC 8945. Key not recognized. */ + ARES_RCODE_BADTIME = 18, /*!< RFC 8945. Signature out of time window. */ + ARES_RCODE_BADMODE = 19, /*!< RFC 2930. Bad TKEY Mode */ + ARES_RCODE_BADNAME = 20, /*!< RFC 2930. Duplicate Key Name */ + ARES_RCODE_BADALG = 21, /*!< RFC 2930. Algorithm not supported */ + ARES_RCODE_BADTRUNC = 22, /*!< RFC 8945. Bad Truncation */ + ARES_RCODE_BADCOOKIE = 23, /*!< RVC 7973. Bad/missing Server Cookie */ + } ares_dns_rcode_t; + + /*! Data types used */ + typedef enum + { + ARES_DATATYPE_INADDR = 1, /*!< struct in_addr * type */ + ARES_DATATYPE_INADDR6 = 2, /*!< struct ares_in6_addr * type */ + ARES_DATATYPE_U8 = 3, /*!< 8bit unsigned integer */ + ARES_DATATYPE_U16 = 4, /*!< 16bit unsigned integer */ + ARES_DATATYPE_U32 = 5, /*!< 32bit unsigned integer */ + ARES_DATATYPE_NAME = 6, /*!< Null-terminated string of a domain name */ + ARES_DATATYPE_STR = 7, /*!< Null-terminated string */ + ARES_DATATYPE_BIN = 8, /*!< Binary data */ + ARES_DATATYPE_BINP = 9, /*!< Officially defined as binary data, but likely + * printable. Guaranteed to have a NULL + * terminator for convenience (not included in + * length) */ + ARES_DATATYPE_OPT = 10, /*!< Array of options. 16bit identifier, BIN + * data. */ + } ares_dns_datatype_t; + + /*! Keys used for all RR Types. We take the record type and multiply by 100 + * to ensure we have a proper offset between keys so we can keep these sorted + */ + typedef enum + { + /*! A Record. Address. Datatype: INADDR */ + ARES_RR_A_ADDR = (ARES_REC_TYPE_A * 100) + 1, + /*! NS Record. Name. Datatype: NAME */ + ARES_RR_NS_NSDNAME = (ARES_REC_TYPE_NS * 100) + 1, + /*! CNAME Record. CName. Datatype: NAME */ + ARES_RR_CNAME_CNAME = (ARES_REC_TYPE_CNAME * 100) + 1, + /*! SOA Record. MNAME, Primary Source of Data. Datatype: NAME */ + ARES_RR_SOA_MNAME = (ARES_REC_TYPE_SOA * 100) + 1, + /*! SOA Record. RNAME, Mailbox of person responsible. Datatype: NAME */ + ARES_RR_SOA_RNAME = (ARES_REC_TYPE_SOA * 100) + 2, + /*! SOA Record. Serial, version. Datatype: U32 */ + ARES_RR_SOA_SERIAL = (ARES_REC_TYPE_SOA * 100) + 3, + /*! SOA Record. Refresh, zone refersh interval. Datatype: U32 */ + ARES_RR_SOA_REFRESH = (ARES_REC_TYPE_SOA * 100) + 4, + /*! SOA Record. Retry, failed refresh retry interval. Datatype: U32 */ + ARES_RR_SOA_RETRY = (ARES_REC_TYPE_SOA * 100) + 5, + /*! SOA Record. Expire, upper limit on authority. Datatype: U32 */ + ARES_RR_SOA_EXPIRE = (ARES_REC_TYPE_SOA * 100) + 6, + /*! SOA Record. Minimum, RR TTL. Datatype: U32 */ + ARES_RR_SOA_MINIMUM = (ARES_REC_TYPE_SOA * 100) + 7, + /*! PTR Record. DNAME, pointer domain. Datatype: NAME */ + ARES_RR_PTR_DNAME = (ARES_REC_TYPE_PTR * 100) + 1, + /*! HINFO Record. CPU. Datatype: STR */ + ARES_RR_HINFO_CPU = (ARES_REC_TYPE_HINFO * 100) + 1, + /*! HINFO Record. OS. Datatype: STR */ + ARES_RR_HINFO_OS = (ARES_REC_TYPE_HINFO * 100) + 2, + /*! MX Record. Preference. Datatype: U16 */ + ARES_RR_MX_PREFERENCE = (ARES_REC_TYPE_MX * 100) + 1, + /*! MX Record. Exchange, domain. Datatype: NAME */ + ARES_RR_MX_EXCHANGE = (ARES_REC_TYPE_MX * 100) + 2, + /*! TXT Record. Data. Datatype: BINP */ + ARES_RR_TXT_DATA = (ARES_REC_TYPE_TXT * 100) + 1, + /*! AAAA Record. Address. Datatype: INADDR6 */ + ARES_RR_AAAA_ADDR = (ARES_REC_TYPE_AAAA * 100) + 1, + /*! SRV Record. Priority. Datatype: U16 */ + ARES_RR_SRV_PRIORITY = (ARES_REC_TYPE_SRV * 100) + 2, + /*! SRV Record. Weight. Datatype: U16 */ + ARES_RR_SRV_WEIGHT = (ARES_REC_TYPE_SRV * 100) + 3, + /*! SRV Record. Port. Datatype: U16 */ + ARES_RR_SRV_PORT = (ARES_REC_TYPE_SRV * 100) + 4, + /*! SRV Record. Target domain. Datatype: NAME */ + ARES_RR_SRV_TARGET = (ARES_REC_TYPE_SRV * 100) + 5, + /*! NAPTR Record. Order. Datatype: U16 */ + ARES_RR_NAPTR_ORDER = (ARES_REC_TYPE_NAPTR * 100) + 1, + /*! NAPTR Record. Preference. Datatype: U16 */ + ARES_RR_NAPTR_PREFERENCE = (ARES_REC_TYPE_NAPTR * 100) + 2, + /*! NAPTR Record. Flags. Datatype: STR */ + ARES_RR_NAPTR_FLAGS = (ARES_REC_TYPE_NAPTR * 100) + 3, + /*! NAPTR Record. Services. Datatype: STR */ + ARES_RR_NAPTR_SERVICES = (ARES_REC_TYPE_NAPTR * 100) + 4, + /*! NAPTR Record. Regexp. Datatype: STR */ + ARES_RR_NAPTR_REGEXP = (ARES_REC_TYPE_NAPTR * 100) + 5, + /*! NAPTR Record. Replacement. Datatype: NAME */ + ARES_RR_NAPTR_REPLACEMENT = (ARES_REC_TYPE_NAPTR * 100) + 6, + /*! OPT Record. UDP Size. Datatype: U16 */ + ARES_RR_OPT_UDP_SIZE = (ARES_REC_TYPE_OPT * 100) + 1, + /*! OPT Record. Version. Datatype: U8 */ + ARES_RR_OPT_VERSION = (ARES_REC_TYPE_OPT * 100) + 3, + /*! OPT Record. Flags. Datatype: U16 */ + ARES_RR_OPT_FLAGS = (ARES_REC_TYPE_OPT * 100) + 4, + /*! OPT Record. Options. Datatype: OPT */ + ARES_RR_OPT_OPTIONS = (ARES_REC_TYPE_OPT * 100) + 5, + /*! TLSA Record. Certificate Usage. Datatype: U8 */ + ARES_RR_TLSA_CERT_USAGE = (ARES_REC_TYPE_TLSA * 100) + 1, + /*! TLSA Record. Selector. Datatype: U8 */ + ARES_RR_TLSA_SELECTOR = (ARES_REC_TYPE_TLSA * 100) + 2, + /*! TLSA Record. Matching Type. Datatype: U8 */ + ARES_RR_TLSA_MATCH = (ARES_REC_TYPE_TLSA * 100) + 3, + /*! TLSA Record. Certificate Association Data. Datatype: BIN */ + ARES_RR_TLSA_DATA = (ARES_REC_TYPE_TLSA * 100) + 4, + /*! SVCB Record. SvcPriority. Datatype: U16 */ + ARES_RR_SVCB_PRIORITY = (ARES_REC_TYPE_SVCB * 100) + 1, + /*! SVCB Record. TargetName. Datatype: NAME */ + ARES_RR_SVCB_TARGET = (ARES_REC_TYPE_SVCB * 100) + 2, + /*! SVCB Record. SvcParams. Datatype: OPT */ + ARES_RR_SVCB_PARAMS = (ARES_REC_TYPE_SVCB * 100) + 3, + /*! HTTPS Record. SvcPriority. Datatype: U16 */ + ARES_RR_HTTPS_PRIORITY = (ARES_REC_TYPE_HTTPS * 100) + 1, + /*! HTTPS Record. TargetName. Datatype: NAME */ + ARES_RR_HTTPS_TARGET = (ARES_REC_TYPE_HTTPS * 100) + 2, + /*! HTTPS Record. SvcParams. Datatype: OPT */ + ARES_RR_HTTPS_PARAMS = (ARES_REC_TYPE_HTTPS * 100) + 3, + /*! URI Record. Priority. Datatype: U16 */ + ARES_RR_URI_PRIORITY = (ARES_REC_TYPE_URI * 100) + 1, + /*! URI Record. Weight. Datatype: U16 */ + ARES_RR_URI_WEIGHT = (ARES_REC_TYPE_URI * 100) + 2, + /*! URI Record. Target domain. Datatype: NAME */ + ARES_RR_URI_TARGET = (ARES_REC_TYPE_URI * 100) + 3, + /*! CAA Record. Critical flag. Datatype: U8 */ + ARES_RR_CAA_CRITICAL = (ARES_REC_TYPE_CAA * 100) + 1, + /*! CAA Record. Tag/Property. Datatype: STR */ + ARES_RR_CAA_TAG = (ARES_REC_TYPE_CAA * 100) + 2, + /*! CAA Record. Value. Datatype: BINP */ + ARES_RR_CAA_VALUE = (ARES_REC_TYPE_CAA * 100) + 3, + /*! RAW Record. RR Type. Datatype: U16 */ + ARES_RR_RAW_RR_TYPE = (ARES_REC_TYPE_RAW_RR * 100) + 1, + /*! RAW Record. RR Data. Datatype: BIN */ + ARES_RR_RAW_RR_DATA = (ARES_REC_TYPE_RAW_RR * 100) + 2, + } ares_dns_rr_key_t; + + /*! TLSA Record ARES_RR_TLSA_CERT_USAGE known values */ + typedef enum + { + /*! Certificate Usage 0. CA Constraint. */ + ARES_TLSA_USAGE_CA = 0, + /*! Certificate Usage 1. Service Certificate Constraint. */ + ARES_TLSA_USAGE_SERVICE = 1, + /*! Certificate Usage 2. Trust Anchor Assertion. */ + ARES_TLSA_USAGE_TRUSTANCHOR = 2, + /*! Certificate Usage 3. Domain-issued certificate. */ + ARES_TLSA_USAGE_DOMAIN = 3 + } ares_tlsa_usage_t; + + /*! TLSA Record ARES_RR_TLSA_SELECTOR known values */ + typedef enum + { + /*! Full Certificate */ + ARES_TLSA_SELECTOR_FULL = 0, + /*! DER-encoded SubjectPublicKeyInfo */ + ARES_TLSA_SELECTOR_SUBJPUBKEYINFO = 1 + } ares_tlsa_selector_t; + + /*! TLSA Record ARES_RR_TLSA_MATCH known values */ + typedef enum + { + /*! Exact match */ + ARES_TLSA_MATCH_EXACT = 0, + /*! Sha256 match */ + ARES_TLSA_MATCH_SHA256 = 1, + /*! Sha512 match */ + ARES_TLSA_MATCH_SHA512 = 2 + } ares_tlsa_match_t; + + /*! SVCB (and HTTPS) RR known parameters */ + typedef enum + { + /*! Mandatory keys in this RR (RFC 9460 Section 8) */ + ARES_SVCB_PARAM_MANDATORY = 0, + /*! Additional supported protocols (RFC 9460 Section 7.1) */ + ARES_SVCB_PARAM_ALPN = 1, + /*! No support for default protocol (RFC 9460 Section 7.1) */ + ARES_SVCB_PARAM_NO_DEFAULT_ALPN = 2, + /*! Port for alternative endpoint (RFC 9460 Section 7.2) */ + ARES_SVCB_PARAM_PORT = 3, + /*! IPv4 address hints (RFC 9460 Section 7.3) */ + ARES_SVCB_PARAM_IPV4HINT = 4, + /*! RESERVED (held for Encrypted ClientHello) */ + ARES_SVCB_PARAM_ECH = 5, + /*! IPv6 address hints (RFC 9460 Section 7.3) */ + ARES_SVCB_PARAM_IPV6HINT = 6 + } ares_svcb_param_t; + + /*! OPT RR known parameters */ + typedef enum + { + /*! RFC 8764. Apple's DNS Long-Lived Queries Protocol */ + ARES_OPT_PARAM_LLQ = 1, + /*! http://files.dns-sd.org/draft-sekar-dns-ul.txt: Update Lease */ + ARES_OPT_PARAM_UL = 2, + /*! RFC 5001. Name Server Identification */ + ARES_OPT_PARAM_NSID = 3, + /*! RFC 6975. DNSSEC Algorithm Understood */ + ARES_OPT_PARAM_DAU = 5, + /*! RFC 6975. DS Hash Understood */ + ARES_OPT_PARAM_DHU = 6, + /*! RFC 6975. NSEC3 Hash Understood */ + ARES_OPT_PARAM_N3U = 7, + /*! RFC 7871. Client Subnet */ + ARES_OPT_PARAM_EDNS_CLIENT_SUBNET = 8, + /*! RFC 7314. Expire Timer */ + ARES_OPT_PARAM_EDNS_EXPIRE = 9, + /*! RFC 7873. Client and Server Cookies */ + ARES_OPT_PARAM_COOKIE = 10, + /*! RFC 7828. TCP Keepalive timeout */ + ARES_OPT_PARAM_EDNS_TCP_KEEPALIVE = 11, + /*! RFC 7830. Padding */ + ARES_OPT_PARAM_PADDING = 12, + /*! RFC 7901. Chain query requests */ + ARES_OPT_PARAM_CHAIN = 13, + /*! RFC 8145. Signaling Trust Anchor Knowledge in DNSSEC */ + ARES_OPT_PARAM_EDNS_KEY_TAG = 14, + /*! RFC 8914. Extended ERROR code and message */ + ARES_OPT_PARAM_EXTENDED_DNS_ERROR = 15, + } ares_opt_param_t; + + /*! Data type for option records for keys like ARES_RR_OPT_OPTIONS and + * ARES_RR_HTTPS_PARAMS returned by ares_dns_opt_get_datatype() */ + typedef enum + { + /*! No value allowed for this option */ + ARES_OPT_DATATYPE_NONE = 1, + /*! List of strings, each prefixed with a single octet representing the length + */ + ARES_OPT_DATATYPE_STR_LIST = 2, + /*! List of 8bit integers, concatenated */ + ARES_OPT_DATATYPE_U8_LIST = 3, + /*! 16bit integer in network byte order */ + ARES_OPT_DATATYPE_U16 = 4, + /*! list of 16bit integer in network byte order, concatenated. */ + ARES_OPT_DATATYPE_U16_LIST = 5, + /*! 32bit integer in network byte order */ + ARES_OPT_DATATYPE_U32 = 6, + /*! list 32bit integer in network byte order, concatenated */ + ARES_OPT_DATATYPE_U32_LIST = 7, + /*! List of ipv4 addresses in network byte order, concatenated */ + ARES_OPT_DATATYPE_INADDR4_LIST = 8, + /*! List of ipv6 addresses in network byte order, concatenated */ + ARES_OPT_DATATYPE_INADDR6_LIST = 9, + /*! Binary Data */ + ARES_OPT_DATATYPE_BIN = 10, + /*! DNS Domain Name Format */ + ARES_OPT_DATATYPE_NAME = 11 + } ares_dns_opt_datatype_t; + + /*! Data type for flags to ares_dns_parse() */ + typedef enum + { + /*! Parse Answers from RFC 1035 that allow name compression as RAW */ + ARES_DNS_PARSE_AN_BASE_RAW = 1 << 0, + /*! Parse Authority from RFC 1035 that allow name compression as RAW */ + ARES_DNS_PARSE_NS_BASE_RAW = 1 << 1, + /*! Parse Additional from RFC 1035 that allow name compression as RAW */ + ARES_DNS_PARSE_AR_BASE_RAW = 1 << 2, + /*! Parse Answers from later RFCs (no name compression) RAW */ + ARES_DNS_PARSE_AN_EXT_RAW = 1 << 3, + /*! Parse Authority from later RFCs (no name compression) as RAW */ + ARES_DNS_PARSE_NS_EXT_RAW = 1 << 4, + /*< Parse Additional from later RFCs (no name compression) as RAW */ + ARES_DNS_PARSE_AR_EXT_RAW = 1 << 5 + } ares_dns_parse_flags_t; + + /*! String representation of DNS Record Type + * + * \param[in] type DNS Record Type + * \return string + */ + CARES_EXTERN const char* ares_dns_rec_type_tostr(ares_dns_rec_type_t type); + + /*! String representation of DNS Class + * + * \param[in] qclass DNS Class + * \return string + */ + CARES_EXTERN const char* ares_dns_class_tostr(ares_dns_class_t qclass); + + /*! String representation of DNS OpCode + * + * \param[in] opcode DNS OpCode + * \return string + */ + CARES_EXTERN const char* ares_dns_opcode_tostr(ares_dns_opcode_t opcode); + + /*! String representation of DNS Resource Record Parameter + * + * \param[in] key DNS Resource Record parameter + * \return string + */ + CARES_EXTERN const char* ares_dns_rr_key_tostr(ares_dns_rr_key_t key); + + /*! String representation of DNS Resource Record section + * + * \param[in] section Section + * \return string + */ + CARES_EXTERN const char* ares_dns_section_tostr(ares_dns_section_t section); + + /*! Convert DNS class name as string to ares_dns_class_t + * + * \param[out] qclass Pointer passed by reference to write class + * \param[in] str String to convert + * \return ARES_TRUE on success + */ + CARES_EXTERN ares_bool_t ares_dns_class_fromstr(ares_dns_class_t* qclass, const char* str); + + /*! Convert DNS record type as string to ares_dns_rec_type_t + * + * \param[out] qtype Pointer passed by reference to write record type + * \param[in] str String to convert + * \return ARES_TRUE on success + */ + CARES_EXTERN ares_bool_t ares_dns_rec_type_fromstr(ares_dns_rec_type_t* qtype, const char* str); + + /*! Convert DNS response code as string to from ares_dns_rcode_t + * + * \param[in] rcode Response code to convert + * \return ARES_TRUE on success + */ + CARES_EXTERN const char* ares_dns_rcode_tostr(ares_dns_rcode_t rcode); + + /*! Convert any valid ip address (ipv4 or ipv6) into struct ares_addr and + * return the starting pointer of the network byte order address and the + * length of the address (4 or 16). + * + * \param[in] ipaddr ASCII string form of the ip address + * \param[in,out] addr Must set "family" member to one of AF_UNSPEC, + * AF_INET, AF_INET6 on input. + * \param[out] ptr_len Length of binary form address + * \return Pointer to start of binary address or NULL on error. + */ + CARES_EXTERN const void* ares_dns_pton(const char* ipaddr, struct ares_addr* addr, size_t* out_len); + + /*! Convert an ip address into the PTR format for in-addr.arpa or in6.arpa + * + * \param[in] addr properly filled address structure + * \return String representing PTR, use ares_free_string() to free + */ + CARES_EXTERN char* ares_dns_addr_to_ptr(const struct ares_addr* addr); + + /*! The options/parameters extensions to some RRs can be somewhat opaque, this + * is a helper to return the best match for a datatype for interpreting the + * option record. + * + * \param[in] key Key associated with options/parameters + * \param[in] opt Option Key/Parameter + * \return Datatype + */ + CARES_EXTERN ares_dns_opt_datatype_t + ares_dns_opt_get_datatype(ares_dns_rr_key_t key, unsigned short opt); + + /*! The options/parameters extensions to some RRs can be somewhat opaque, this + * is a helper to return the name if the option is known. + * + * \param[in] key Key associated with options/parameters + * \param[in] opt Option Key/Parameter + * \return name, or NULL if not known. + */ + CARES_EXTERN const char* ares_dns_opt_get_name(ares_dns_rr_key_t key, unsigned short opt); + + /*! Retrieve a list of Resource Record keys that can be set or retrieved for + * the Resource record type. + * + * \param[in] type Record Type + * \param[out] cnt Number of keys returned + * \return array of keys associated with Resource Record + */ + CARES_EXTERN const ares_dns_rr_key_t* + ares_dns_rr_get_keys(ares_dns_rec_type_t type, size_t* cnt); + + /*! Retrieve the datatype associated with a Resource Record key. + * + * \param[in] key Resource Record Key + * \return datatype + */ + CARES_EXTERN ares_dns_datatype_t + ares_dns_rr_key_datatype(ares_dns_rr_key_t key); + + /*! Retrieve the DNS Resource Record type associated with a Resource Record key. + * + * \param[in] key Resource Record Key + * \return DNS Resource Record Type + */ + CARES_EXTERN ares_dns_rec_type_t + ares_dns_rr_key_to_rec_type(ares_dns_rr_key_t key); + + /*! Opaque data type representing a DNS RR (Resource Record) */ + struct ares_dns_rr; + + /*! Typedef for opaque data type representing a DNS RR (Resource Record) */ + typedef struct ares_dns_rr ares_dns_rr_t; + + /*! Opaque data type representing a DNS Query Data QD Packet */ + struct ares_dns_qd; + + /*! Typedef for opaque data type representing a DNS Query Data QD Packet */ + typedef struct ares_dns_qd ares_dns_qd_t; + + /*! Opaque data type representing a DNS Packet */ + struct ares_dns_record; + + /*! Typedef for opaque data type representing a DNS Packet */ + typedef struct ares_dns_record ares_dns_record_t; + + /*! Create a new DNS record object + * + * \param[out] dnsrec Pointer passed by reference for a newly allocated + * record object. Must be ares_dns_record_destroy()'d by + * caller. + * \param[in] id DNS Query ID. If structuring a new query to be sent + * with ares_send(), this value should be zero. + * \param[in] flags DNS Flags from \ares_dns_flags_t + * \param[in] opcode DNS OpCode (typically ARES_OPCODE_QUERY) + * \param[in] rcode DNS RCode + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_record_create(ares_dns_record_t** dnsrec, unsigned short id, unsigned short flags, ares_dns_opcode_t opcode, ares_dns_rcode_t rcode); + + /*! Destroy a DNS record object + * + * \param[in] dnsrec Initialized record object + */ + CARES_EXTERN void ares_dns_record_destroy(ares_dns_record_t* dnsrec); + + /*! Get the DNS Query ID + * + * \param[in] dnsrec Initialized record object + * \return DNS query id + */ + CARES_EXTERN unsigned short + ares_dns_record_get_id(const ares_dns_record_t* dnsrec); + + /*! Get the DNS Record Flags + * + * \param[in] dnsrec Initialized record object + * \return One or more \ares_dns_flags_t + */ + CARES_EXTERN unsigned short + ares_dns_record_get_flags(const ares_dns_record_t* dnsrec); + + /*! Get the DNS Record OpCode + * + * \param[in] dnsrec Initialized record object + * \return opcode + */ + CARES_EXTERN ares_dns_opcode_t + ares_dns_record_get_opcode(const ares_dns_record_t* dnsrec); + + /*! Get the DNS Record RCode + * + * \param[in] dnsrec Initialized record object + * \return rcode + */ + CARES_EXTERN ares_dns_rcode_t + ares_dns_record_get_rcode(const ares_dns_record_t* dnsrec); + + /*! Add a query to the DNS Record. Typically a record will have only 1 + * query. Most DNS servers will reject queries with more than 1 question. + * + * \param[in] dnsrec Initialized record object + * \param[in] name Name/Hostname of request + * \param[in] qtype Type of query + * \param[in] qclass Class of query (typically ARES_CLASS_IN) + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_record_query_add(ares_dns_record_t* dnsrec, const char* name, ares_dns_rec_type_t qtype, ares_dns_class_t qclass); + + /*! Get the count of queries in the DNS Record + * + * \param[in] dnsrec Initialized record object + * \return count of queries + */ + CARES_EXTERN size_t ares_dns_record_query_cnt(const ares_dns_record_t* dnsrec); + + /*! Get the data about the query at the provided index. + * + * \param[in] dnsrec Initialized record object + * \param[in] idx Index of query + * \param[out] name Optional. Returns name, may pass NULL if not desired. + * \param[out] qtype Optional. Returns record type, may pass NULL. + * \param[out] qclass Optional. Returns class, may pass NULL. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_record_query_get( + const ares_dns_record_t* dnsrec, size_t idx, const char** name, ares_dns_rec_type_t* qtype, ares_dns_class_t* qclass + ); + + /*! Get the count of Resource Records in the provided section + * + * \param[in] dnsrec Initialized record object + * \param[in] sect Section. ARES_SECTION_ANSWER is most used. + * \return count of resource records. + */ + CARES_EXTERN size_t ares_dns_record_rr_cnt(const ares_dns_record_t* dnsrec, ares_dns_section_t sect); + + /*! Add a Resource Record to the DNS Record. + * + * \param[out] rr_out Pointer to created resource record. This pointer + * is owned by the DNS record itself, this is just made + * available to facilitate adding RR-specific fields. + * \param[in] dnsrec Initialized record object + * \param[in] sect Section to add resource record to + * \param[in] name Resource Record name/hostname + * \param[in] type Record Type + * \param[in] rclass Class + * \param[in] ttl TTL + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_record_rr_add( + ares_dns_rr_t** rr_out, ares_dns_record_t* dnsrec, ares_dns_section_t sect, const char* name, ares_dns_rec_type_t type, ares_dns_class_t rclass, unsigned int ttl + ); + + /*! Fetch a resource record based on the section and index. + * + * \param[in] dnsrec Initialized record object + * \param[in] sect Section for resource record + * \param[in] idx Index of resource record in section + * \return NULL on misuse, otherwise a pointer to the resource record + */ + CARES_EXTERN ares_dns_rr_t* ares_dns_record_rr_get(ares_dns_record_t* dnsrec, ares_dns_section_t sect, size_t idx); + + /*! Remove the resource record based on the section and index + * + * \param[in] dnsrec Initialized record object + * \param[in] sect Section for resource record + * \param[in] idx Index of resource record in section + * \return ARES_SUCCESS on success, otherwise an error code. + */ + CARES_EXTERN ares_status_t ares_dns_record_rr_del(ares_dns_record_t* dnsrec, ares_dns_section_t sect, size_t idx); + + /*! Retrieve the resource record Name/Hostname + * + * \param[in] rr Pointer to resource record + * \return Name + */ + CARES_EXTERN const char* ares_dns_rr_get_name(const ares_dns_rr_t* rr); + + /*! Retrieve the resource record type + * + * \param[in] rr Pointer to resource record + * \return type + */ + CARES_EXTERN ares_dns_rec_type_t ares_dns_rr_get_type(const ares_dns_rr_t* rr); + + /*! Retrieve the resource record class + * + * \param[in] rr Pointer to resource record + * \return class + */ + CARES_EXTERN ares_dns_class_t ares_dns_rr_get_class(const ares_dns_rr_t* rr); + + /*! Retrieve the resource record TTL + * + * \param[in] rr Pointer to resource record + * \return TTL + */ + CARES_EXTERN unsigned int ares_dns_rr_get_ttl(const ares_dns_rr_t* rr); + + /*! Set ipv4 address data type for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_INADDR + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] addr Pointer to ipv4 address to use. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_addr(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, const struct in_addr* addr); + + /*! Set ipv6 address data type for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_INADDR6 + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] addr Pointer to ipv6 address to use. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t + ares_dns_rr_set_addr6(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, const struct ares_in6_addr* addr); + + /*! Set string data for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_STR or ARES_DATATYPE_NAME. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] val Pointer to string to set. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_str(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, const char* val); + + /*! Set 8bit unsigned integer for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_U8 + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] val 8bit unsigned integer + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_u8(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, unsigned char val); + + /*! Set 16bit unsigned integer for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_U16 + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] val 16bit unsigned integer + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_u16(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, unsigned short val); + + /*! Set 32bit unsigned integer for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_U32 + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] val 32bit unsigned integer + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_u32(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, unsigned int val); + + /*! Set binary (BIN or BINP) data for specified resource record and key. Can + * only be used on keys with datatype ARES_DATATYPE_BIN or ARES_DATATYPE_BINP. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] val Pointer to binary data. + * \param[in] len Length of binary data + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_bin(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, const unsigned char* val, size_t len); + + /*! Set the option for the RR + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] opt Option record key id. + * \param[out] val Optional. Value to associate with option. + * \param[out] val_len Length of value passed. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_rr_set_opt(ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, unsigned short opt, const unsigned char* val, size_t val_len); + + /*! Retrieve a pointer to the ipv4 address. Can only be used on keys with + * datatype ARES_DATATYPE_INADDR. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return pointer to ipv4 address or NULL on error + */ + CARES_EXTERN const struct in_addr* + ares_dns_rr_get_addr(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve a pointer to the ipv6 address. Can only be used on keys with + * datatype ARES_DATATYPE_INADDR6. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return pointer to ipv6 address or NULL on error + */ + CARES_EXTERN const struct ares_in6_addr* + ares_dns_rr_get_addr6(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve a pointer to the string. Can only be used on keys with + * datatype ARES_DATATYPE_STR and ARES_DATATYPE_NAME. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return pointer string or NULL on error + */ + CARES_EXTERN const char* ares_dns_rr_get_str(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve an 8bit unsigned integer. Can only be used on keys with + * datatype ARES_DATATYPE_U8. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return 8bit unsigned integer + */ + CARES_EXTERN unsigned char ares_dns_rr_get_u8(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve an 16bit unsigned integer. Can only be used on keys with + * datatype ARES_DATATYPE_U16. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return 16bit unsigned integer + */ + CARES_EXTERN unsigned short ares_dns_rr_get_u16(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve an 32bit unsigned integer. Can only be used on keys with + * datatype ARES_DATATYPE_U32. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return 32bit unsigned integer + */ + CARES_EXTERN unsigned int ares_dns_rr_get_u32(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve a pointer to the binary data. Can only be used on keys with + * datatype ARES_DATATYPE_BIN or ARES_DATATYPE_BINP. If BINP, the data is + * guaranteed to have a NULL terminator which is NOT included in the length. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[out] len Length of binary data returned + * \return pointer binary data or NULL on error + */ + CARES_EXTERN const unsigned char* + ares_dns_rr_get_bin(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, size_t* len); + + /*! Retrieve the number of options stored for the RR. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \return count, or 0 if none. + */ + CARES_EXTERN size_t ares_dns_rr_get_opt_cnt(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key); + + /*! Retrieve the option for the RR by index. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] idx Index of option record + * \param[out] val Optional. Pointer passed by reference to hold value. + * Options may not have values. Value if returned is + * guaranteed to be NULL terminated, however in most + * cases it is not printable. + * \param[out] val_len Optional. Pointer passed by reference to hold value + * length. + * \return option key/id on success, 65535 on misuse. + */ + CARES_EXTERN unsigned short + ares_dns_rr_get_opt(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, size_t idx, const unsigned char** val, size_t* val_len); + + /*! Retrieve the option for the RR by the option key/id. + * + * \param[in] dns_rr Pointer to resource record + * \param[in] key DNS Resource Record Key + * \param[in] opt Option record key id (this is not the index). + * \param[out] val Optional. Pointer passed by reference to hold value. + * Options may not have values. Value if returned is + * guaranteed to be NULL terminated, however in most cases + * it is not printable. + * \param[out] val_len Optional. Pointer passed by reference to hold value + * length. + * \return ARES_TRUE on success, ARES_FALSE on misuse. + */ + CARES_EXTERN ares_bool_t ares_dns_rr_get_opt_byid(const ares_dns_rr_t* dns_rr, ares_dns_rr_key_t key, unsigned short opt, const unsigned char** val, size_t* val_len); + + /*! Parse a complete DNS message. + * + * \param[in] buf pointer to bytes to be parsed + * \param[in] buf_len Length of buf provided + * \param[in] flags Flags dictating how the message should be parsed. + * \param[out] dnsrec Pointer passed by reference for a new DNS record object + * that must be ares_dns_record_destroy()'d by caller. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_parse(const unsigned char* buf, size_t buf_len, unsigned int flags, ares_dns_record_t** dnsrec); + + /*! Write a complete DNS message + * + * \param[in] dnsrec Pointer to initialized and filled DNS record object. + * \param[out] buf Pointer passed by reference to be filled in with with + * DNS message. Must be ares_free()'d by caller. + * \param[out] buf_len Length of returned buffer containing DNS message. + * \return ARES_SUCCESS on success + */ + CARES_EXTERN ares_status_t ares_dns_write(ares_dns_record_t* dnsrec, unsigned char** buf, size_t* buf_len); + /*! @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* __ARES_DNS_RECORD_H */ diff --git a/CAPI/cpp/grpc/include/ares_nameser.h b/CAPI/cpp/grpc/include/ares_nameser.h new file mode 100644 index 00000000..a7f0b4d1 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_nameser.h @@ -0,0 +1,508 @@ +/* MIT License + * + * Copyright (c) Massachusetts Institute of Technology + * Copyright (c) Daniel Stenberg + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ + +#ifndef ARES_NAMESER_H +#define ARES_NAMESER_H + +#include "ares_build.h" + +#ifdef CARES_HAVE_ARPA_NAMESER_H +#include +#endif +#ifdef CARES_HAVE_ARPA_NAMESER_COMPAT_H +#include +#endif + +/* ============================================================================ + * arpa/nameser.h may or may not provide ALL of the below defines, so check + * each one individually and set if not + * ============================================================================ + */ + +#ifndef NS_PACKETSZ +#define NS_PACKETSZ 512 /* maximum packet size */ +#endif + +#ifndef NS_MAXDNAME +#define NS_MAXDNAME 256 /* maximum domain name */ +#endif + +#ifndef NS_MAXCDNAME +#define NS_MAXCDNAME 255 /* maximum compressed domain name */ +#endif + +#ifndef NS_MAXLABEL +#define NS_MAXLABEL 63 +#endif + +#ifndef NS_HFIXEDSZ +#define NS_HFIXEDSZ 12 /* #/bytes of fixed data in header */ +#endif + +#ifndef NS_QFIXEDSZ +#define NS_QFIXEDSZ 4 /* #/bytes of fixed data in query */ +#endif + +#ifndef NS_RRFIXEDSZ +#define NS_RRFIXEDSZ 10 /* #/bytes of fixed data in r record */ +#endif + +#ifndef NS_INT16SZ +#define NS_INT16SZ 2 +#endif + +#ifndef NS_INADDRSZ +#define NS_INADDRSZ 4 +#endif + +#ifndef NS_IN6ADDRSZ +#define NS_IN6ADDRSZ 16 +#endif + +#ifndef NS_CMPRSFLGS +#define NS_CMPRSFLGS 0xc0 /* Flag bits indicating name compression. */ +#endif + +#ifndef NS_DEFAULTPORT +#define NS_DEFAULTPORT 53 /* For both TCP and UDP. */ +#endif + +/* ============================================================================ + * arpa/nameser.h should provide these enumerations always, so if not found, + * provide them + * ============================================================================ + */ +#ifndef CARES_HAVE_ARPA_NAMESER_H + +typedef enum __ns_class +{ + ns_c_invalid = 0, /* Cookie. */ + ns_c_in = 1, /* Internet. */ + ns_c_2 = 2, /* unallocated/unsupported. */ + ns_c_chaos = 3, /* MIT Chaos-net. */ + ns_c_hs = 4, /* MIT Hesiod. */ + /* Query class values which do not appear in resource records */ + ns_c_none = 254, /* for prereq. sections in update requests */ + ns_c_any = 255, /* Wildcard match. */ + ns_c_max = 65536 +} ns_class; + +typedef enum __ns_type +{ + ns_t_invalid = 0, /* Cookie. */ + ns_t_a = 1, /* Host address. */ + ns_t_ns = 2, /* Authoritative server. */ + ns_t_md = 3, /* Mail destination. */ + ns_t_mf = 4, /* Mail forwarder. */ + ns_t_cname = 5, /* Canonical name. */ + ns_t_soa = 6, /* Start of authority zone. */ + ns_t_mb = 7, /* Mailbox domain name. */ + ns_t_mg = 8, /* Mail group member. */ + ns_t_mr = 9, /* Mail rename name. */ + ns_t_null = 10, /* Null resource record. */ + ns_t_wks = 11, /* Well known service. */ + ns_t_ptr = 12, /* Domain name pointer. */ + ns_t_hinfo = 13, /* Host information. */ + ns_t_minfo = 14, /* Mailbox information. */ + ns_t_mx = 15, /* Mail routing information. */ + ns_t_txt = 16, /* Text strings. */ + ns_t_rp = 17, /* Responsible person. */ + ns_t_afsdb = 18, /* AFS cell database. */ + ns_t_x25 = 19, /* X_25 calling address. */ + ns_t_isdn = 20, /* ISDN calling address. */ + ns_t_rt = 21, /* Router. */ + ns_t_nsap = 22, /* NSAP address. */ + ns_t_nsap_ptr = 23, /* Reverse NSAP lookup (deprecated). */ + ns_t_sig = 24, /* Security signature. */ + ns_t_key = 25, /* Security key. */ + ns_t_px = 26, /* X.400 mail mapping. */ + ns_t_gpos = 27, /* Geographical position (withdrawn). */ + ns_t_aaaa = 28, /* Ip6 Address. */ + ns_t_loc = 29, /* Location Information. */ + ns_t_nxt = 30, /* Next domain (security). */ + ns_t_eid = 31, /* Endpoint identifier. */ + ns_t_nimloc = 32, /* Nimrod Locator. */ + ns_t_srv = 33, /* Server Selection. */ + ns_t_atma = 34, /* ATM Address */ + ns_t_naptr = 35, /* Naming Authority PoinTeR */ + ns_t_kx = 36, /* Key Exchange */ + ns_t_cert = 37, /* Certification record */ + ns_t_a6 = 38, /* IPv6 address (deprecates AAAA) */ + ns_t_dname = 39, /* Non-terminal DNAME (for IPv6) */ + ns_t_sink = 40, /* Kitchen sink (experimental) */ + ns_t_opt = 41, /* EDNS0 option (meta-RR) */ + ns_t_apl = 42, /* Address prefix list (RFC3123) */ + ns_t_ds = 43, /* Delegation Signer (RFC4034) */ + ns_t_sshfp = 44, /* SSH Key Fingerprint (RFC4255) */ + ns_t_rrsig = 46, /* Resource Record Signature (RFC4034) */ + ns_t_nsec = 47, /* Next Secure (RFC4034) */ + ns_t_dnskey = 48, /* DNS Public Key (RFC4034) */ + ns_t_tkey = 249, /* Transaction key */ + ns_t_tsig = 250, /* Transaction signature. */ + ns_t_ixfr = 251, /* Incremental zone transfer. */ + ns_t_axfr = 252, /* Transfer zone of authority. */ + ns_t_mailb = 253, /* Transfer mailbox records. */ + ns_t_maila = 254, /* Transfer mail agent records. */ + ns_t_any = 255, /* Wildcard match. */ + ns_t_uri = 256, /* Uniform Resource Identifier (RFC7553) */ + ns_t_caa = 257, /* Certification Authority Authorization. */ + ns_t_max = 65536 +} ns_type; + +typedef enum __ns_opcode +{ + ns_o_query = 0, /* Standard query. */ + ns_o_iquery = 1, /* Inverse query (deprecated/unsupported). */ + ns_o_status = 2, /* Name server status query (unsupported). */ + /* Opcode 3 is undefined/reserved. */ + ns_o_notify = 4, /* Zone change notification. */ + ns_o_update = 5, /* Zone update message. */ + ns_o_max = 6 +} ns_opcode; + +typedef enum __ns_rcode +{ + ns_r_noerror = 0, /* No error occurred. */ + ns_r_formerr = 1, /* Format error. */ + ns_r_servfail = 2, /* Server failure. */ + ns_r_nxdomain = 3, /* Name error. */ + ns_r_notimpl = 4, /* Unimplemented. */ + ns_r_refused = 5, /* Operation refused. */ + /* these are for BIND_UPDATE */ + ns_r_yxdomain = 6, /* Name exists */ + ns_r_yxrrset = 7, /* RRset exists */ + ns_r_nxrrset = 8, /* RRset does not exist */ + ns_r_notauth = 9, /* Not authoritative for zone */ + ns_r_notzone = 10, /* Zone of record different from zone section */ + ns_r_max = 11, + /* The following are TSIG extended errors */ + ns_r_badsig = 16, + ns_r_badkey = 17, + ns_r_badtime = 18 +} ns_rcode; + +#endif /* CARES_HAVE_ARPA_NAMESER_H */ + +/* ============================================================================ + * arpa/nameser_compat.h typically sets these. However on some systems + * arpa/nameser.h does, but may not set all of them. Lets conditionally + * define each + * ============================================================================ + */ + +#ifndef PACKETSZ +#define PACKETSZ NS_PACKETSZ +#endif + +#ifndef MAXDNAME +#define MAXDNAME NS_MAXDNAME +#endif + +#ifndef MAXCDNAME +#define MAXCDNAME NS_MAXCDNAME +#endif + +#ifndef MAXLABEL +#define MAXLABEL NS_MAXLABEL +#endif + +#ifndef HFIXEDSZ +#define HFIXEDSZ NS_HFIXEDSZ +#endif + +#ifndef QFIXEDSZ +#define QFIXEDSZ NS_QFIXEDSZ +#endif + +#ifndef RRFIXEDSZ +#define RRFIXEDSZ NS_RRFIXEDSZ +#endif + +#ifndef INDIR_MASK +#define INDIR_MASK NS_CMPRSFLGS +#endif + +#ifndef NAMESERVER_PORT +#define NAMESERVER_PORT NS_DEFAULTPORT +#endif + +/* opcodes */ +#ifndef O_QUERY +#define O_QUERY 0 /* ns_o_query */ +#endif +#ifndef O_IQUERY +#define O_IQUERY 1 /* ns_o_iquery */ +#endif +#ifndef O_STATUS +#define O_STATUS 2 /* ns_o_status */ +#endif +#ifndef O_NOTIFY +#define O_NOTIFY 4 /* ns_o_notify */ +#endif +#ifndef O_UPDATE +#define O_UPDATE 5 /* ns_o_update */ +#endif + +/* response codes */ +#ifndef SERVFAIL +#define SERVFAIL ns_r_servfail +#endif +#ifndef NOTIMP +#define NOTIMP ns_r_notimpl +#endif +#ifndef REFUSED +#define REFUSED ns_r_refused +#endif +#if defined(_WIN32) && !defined(HAVE_ARPA_NAMESER_COMPAT_H) && defined(NOERROR) +#undef NOERROR /* it seems this is already defined in winerror.h */ +#endif +#ifndef NOERROR +#define NOERROR ns_r_noerror +#endif +#ifndef FORMERR +#define FORMERR ns_r_formerr +#endif +#ifndef NXDOMAIN +#define NXDOMAIN ns_r_nxdomain +#endif +/* Non-standard response codes, use numeric values */ +#ifndef YXDOMAIN +#define YXDOMAIN 6 /* ns_r_yxdomain */ +#endif +#ifndef YXRRSET +#define YXRRSET 7 /* ns_r_yxrrset */ +#endif +#ifndef NXRRSET +#define NXRRSET 8 /* ns_r_nxrrset */ +#endif +#ifndef NOTAUTH +#define NOTAUTH 9 /* ns_r_notauth */ +#endif +#ifndef NOTZONE +#define NOTZONE 10 /* ns_r_notzone */ +#endif +#ifndef TSIG_BADSIG +#define TSIG_BADSIG 16 /* ns_r_badsig */ +#endif +#ifndef TSIG_BADKEY +#define TSIG_BADKEY 17 /* ns_r_badkey */ +#endif +#ifndef TSIG_BADTIME +#define TSIG_BADTIME 18 /* ns_r_badtime */ +#endif + +/* classes */ +#ifndef C_IN +#define C_IN 1 /* ns_c_in */ +#endif +#ifndef C_CHAOS +#define C_CHAOS 3 /* ns_c_chaos */ +#endif +#ifndef C_HS +#define C_HS 4 /* ns_c_hs */ +#endif +#ifndef C_NONE +#define C_NONE 254 /* ns_c_none */ +#endif +#ifndef C_ANY +#define C_ANY 255 /* ns_c_any */ +#endif + +/* types */ +#ifndef T_A +#define T_A 1 /* ns_t_a */ +#endif +#ifndef T_NS +#define T_NS 2 /* ns_t_ns */ +#endif +#ifndef T_MD +#define T_MD 3 /* ns_t_md */ +#endif +#ifndef T_MF +#define T_MF 4 /* ns_t_mf */ +#endif +#ifndef T_CNAME +#define T_CNAME 5 /* ns_t_cname */ +#endif +#ifndef T_SOA +#define T_SOA 6 /* ns_t_soa */ +#endif +#ifndef T_MB +#define T_MB 7 /* ns_t_mb */ +#endif +#ifndef T_MG +#define T_MG 8 /* ns_t_mg */ +#endif +#ifndef T_MR +#define T_MR 9 /* ns_t_mr */ +#endif +#ifndef T_NULL +#define T_NULL 10 /* ns_t_null */ +#endif +#ifndef T_WKS +#define T_WKS 11 /* ns_t_wks */ +#endif +#ifndef T_PTR +#define T_PTR 12 /* ns_t_ptr */ +#endif +#ifndef T_HINFO +#define T_HINFO 13 /* ns_t_hinfo */ +#endif +#ifndef T_MINFO +#define T_MINFO 14 /* ns_t_minfo */ +#endif +#ifndef T_MX +#define T_MX 15 /* ns_t_mx */ +#endif +#ifndef T_TXT +#define T_TXT 16 /* ns_t_txt */ +#endif +#ifndef T_RP +#define T_RP 17 /* ns_t_rp */ +#endif +#ifndef T_AFSDB +#define T_AFSDB 18 /* ns_t_afsdb */ +#endif +#ifndef T_X25 +#define T_X25 19 /* ns_t_x25 */ +#endif +#ifndef T_ISDN +#define T_ISDN 20 /* ns_t_isdn */ +#endif +#ifndef T_RT +#define T_RT 21 /* ns_t_rt */ +#endif +#ifndef T_NSAP +#define T_NSAP 22 /* ns_t_nsap */ +#endif +#ifndef T_NSAP_PTR +#define T_NSAP_PTR 23 /* ns_t_nsap_ptr */ +#endif +#ifndef T_SIG +#define T_SIG 24 /* ns_t_sig */ +#endif +#ifndef T_KEY +#define T_KEY 25 /* ns_t_key */ +#endif +#ifndef T_PX +#define T_PX 26 /* ns_t_px */ +#endif +#ifndef T_GPOS +#define T_GPOS 27 /* ns_t_gpos */ +#endif +#ifndef T_AAAA +#define T_AAAA 28 /* ns_t_aaaa */ +#endif +#ifndef T_LOC +#define T_LOC 29 /* ns_t_loc */ +#endif +#ifndef T_NXT +#define T_NXT 30 /* ns_t_nxt */ +#endif +#ifndef T_EID +#define T_EID 31 /* ns_t_eid */ +#endif +#ifndef T_NIMLOC +#define T_NIMLOC 32 /* ns_t_nimloc */ +#endif +#ifndef T_SRV +#define T_SRV 33 /* ns_t_srv */ +#endif +#ifndef T_ATMA +#define T_ATMA 34 /* ns_t_atma */ +#endif +#ifndef T_NAPTR +#define T_NAPTR 35 /* ns_t_naptr */ +#endif +#ifndef T_KX +#define T_KX 36 /* ns_t_kx */ +#endif +#ifndef T_CERT +#define T_CERT 37 /* ns_t_cert */ +#endif +#ifndef T_A6 +#define T_A6 38 /* ns_t_a6 */ +#endif +#ifndef T_DNAME +#define T_DNAME 39 /* ns_t_dname */ +#endif +#ifndef T_SINK +#define T_SINK 40 /* ns_t_sink */ +#endif +#ifndef T_OPT +#define T_OPT 41 /* ns_t_opt */ +#endif +#ifndef T_APL +#define T_APL 42 /* ns_t_apl */ +#endif +#ifndef T_DS +#define T_DS 43 /* ns_t_ds */ +#endif +#ifndef T_SSHFP +#define T_SSHFP 44 /* ns_t_sshfp */ +#endif +#ifndef T_RRSIG +#define T_RRSIG 46 /* ns_t_rrsig */ +#endif +#ifndef T_NSEC +#define T_NSEC 47 /* ns_t_nsec */ +#endif +#ifndef T_DNSKEY +#define T_DNSKEY 48 /* ns_t_dnskey */ +#endif +#ifndef T_TKEY +#define T_TKEY 249 /* ns_t_tkey */ +#endif +#ifndef T_TSIG +#define T_TSIG 250 /* ns_t_tsig */ +#endif +#ifndef T_IXFR +#define T_IXFR 251 /* ns_t_ixfr */ +#endif +#ifndef T_AXFR +#define T_AXFR 252 /* ns_t_axfr */ +#endif +#ifndef T_MAILB +#define T_MAILB 253 /* ns_t_mailb */ +#endif +#ifndef T_MAILA +#define T_MAILA 254 /* ns_t_maila */ +#endif +#ifndef T_ANY +#define T_ANY 255 /* ns_t_any */ +#endif +#ifndef T_URI +#define T_URI 256 /* ns_t_uri */ +#endif +#ifndef T_CAA +#define T_CAA 257 /* ns_t_caa */ +#endif +#ifndef T_MAX +#define T_MAX 65536 /* ns_t_max */ +#endif + +#endif /* ARES_NAMESER_H */ diff --git a/CAPI/cpp/grpc/include/ares_rules.h b/CAPI/cpp/grpc/include/ares_rules.h new file mode 100644 index 00000000..136aa2b5 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_rules.h @@ -0,0 +1,134 @@ +/* MIT License + * + * Copyright (c) 2009 Daniel Stenberg + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ +#ifndef __CARES_RULES_H +#define __CARES_RULES_H + +/* ================================================================ */ +/* COMPILE TIME SANITY CHECKS */ +/* ================================================================ */ + +/* + * NOTE 1: + * ------- + * + * All checks done in this file are intentionally placed in a public + * header file which is pulled by ares.h when an application is + * being built using an already built c-ares library. Additionally + * this file is also included and used when building the library. + * + * If compilation fails on this file it is certainly sure that the + * problem is elsewhere. It could be a problem in the ares_build.h + * header file, or simply that you are using different compilation + * settings than those used to build the library. + * + * Nothing in this file is intended to be modified or adjusted by the + * c-ares library user nor by the c-ares library builder. + * + * Do not deactivate any check, these are done to make sure that the + * library is properly built and used. + * + * You can find further help on the c-ares development mailing list: + * http://lists.haxx.se/listinfo/c-ares/ + * + * NOTE 2 + * ------ + * + * Some of the following compile time checks are based on the fact + * that the dimension of a constant array can not be a negative one. + * In this way if the compile time verification fails, the compilation + * will fail issuing an error. The error description wording is compiler + * dependent but it will be quite similar to one of the following: + * + * "negative subscript or subscript is too large" + * "array must have at least one element" + * "-1 is an illegal array size" + * "size of array is negative" + * + * If you are building an application which tries to use an already + * built c-ares library and you are getting this kind of errors on + * this file, it is a clear indication that there is a mismatch between + * how the library was built and how you are trying to use it for your + * application. Your already compiled or binary library provider is the + * only one who can give you the details you need to properly use it. + */ + +/* + * Verify that some macros are actually defined. + */ + +#ifndef CARES_TYPEOF_ARES_SOCKLEN_T +#error "CARES_TYPEOF_ARES_SOCKLEN_T definition is missing!" +Error Compilation_aborted_CARES_TYPEOF_ARES_SOCKLEN_T_is_missing +#endif + +/* + * Macros private to this header file. + */ + +#define CareschkszEQ(t, s) sizeof(t) == s ? 1 : -1 + +#define CareschkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1 + + /* + * Verify that the size previously defined and expected for + * ares_socklen_t is actually the same as the one reported + * by sizeof() at compile time. + */ + + typedef char __cares_rule_02__[CareschkszEQ( + ares_socklen_t, sizeof(CARES_TYPEOF_ARES_SOCKLEN_T) + )]; + +/* + * Verify at compile time that the size of ares_socklen_t as reported + * by sizeof() is greater or equal than the one reported for int for + * the current compilation. + */ + +typedef char __cares_rule_03__[CareschkszGE(ares_socklen_t, int)]; + +/* ================================================================ */ +/* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */ +/* ================================================================ */ + +/* + * Get rid of macros private to this header file. + */ + +#undef CareschkszEQ +#undef CareschkszGE + +/* + * Get rid of macros not intended to exist beyond this point. + */ + +#undef CARES_PULL_WS2TCPIP_H +#undef CARES_PULL_SYS_TYPES_H +#undef CARES_PULL_SYS_SOCKET_H + +#undef CARES_TYPEOF_ARES_SOCKLEN_T + +#endif /* __CARES_RULES_H */ diff --git a/CAPI/cpp/grpc/include/ares_version.h b/CAPI/cpp/grpc/include/ares_version.h new file mode 100644 index 00000000..6ebf8b22 --- /dev/null +++ b/CAPI/cpp/grpc/include/ares_version.h @@ -0,0 +1,49 @@ +/* MIT License + * + * Copyright (c) Daniel Stenberg + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ + +#ifndef ARES__VERSION_H +#define ARES__VERSION_H + +/* This is the global package copyright */ +#define ARES_COPYRIGHT "2004 - 2024 Daniel Stenberg, ." + +#define ARES_VERSION_MAJOR 1 +#define ARES_VERSION_MINOR 27 +#define ARES_VERSION_PATCH 0 +#define ARES_VERSION \ + ((ARES_VERSION_MAJOR << 16) | (ARES_VERSION_MINOR << 8) | \ + (ARES_VERSION_PATCH)) +#define ARES_VERSION_STR "1.27.0" + +#if (ARES_VERSION >= 0x010700) +#define CARES_HAVE_ARES_LIBRARY_INIT 1 +#define CARES_HAVE_ARES_LIBRARY_CLEANUP 1 +#else +#undef CARES_HAVE_ARES_LIBRARY_INIT +#undef CARES_HAVE_ARES_LIBRARY_CLEANUP +#endif + +#endif diff --git a/CAPI/cpp/grpc/include/google/protobuf/any.h b/CAPI/cpp/grpc/include/google/protobuf/any.h new file mode 100644 index 00000000..751055e0 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/any.h @@ -0,0 +1,158 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_ANY_H__ +#define GOOGLE_PROTOBUF_ANY_H__ + +#include + +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + class FieldDescriptor; + class Message; + + namespace internal + { + + extern const char kAnyFullTypeName[]; // "google.protobuf.Any". + extern const char kTypeGoogleApisComPrefix[]; // "type.googleapis.com/". + extern const char kTypeGoogleProdComPrefix[]; // "type.googleprod.com/". + + std::string GetTypeUrl(StringPiece message_name, StringPiece type_url_prefix); + + // Helper class used to implement google::protobuf::Any. + class PROTOBUF_EXPORT AnyMetadata + { + typedef ArenaStringPtr UrlType; + typedef ArenaStringPtr ValueType; + + public: + // AnyMetadata does not take ownership of "type_url" and "value". + constexpr AnyMetadata(UrlType* type_url, ValueType* value) : + type_url_(type_url), + value_(value) + { + } + + // Packs a message using the default type URL prefix: "type.googleapis.com". + // The resulted type URL will be "type.googleapis.com/". + // Returns false if serializing the message failed. + template + bool PackFrom(Arena* arena, const T& message) + { + return InternalPackFrom(arena, message, kTypeGoogleApisComPrefix, T::FullMessageName()); + } + + bool PackFrom(Arena* arena, const Message& message); + + // Packs a message using the given type URL prefix. The type URL will be + // constructed by concatenating the message type's full name to the prefix + // with an optional "/" separator if the prefix doesn't already end with "/". + // For example, both PackFrom(message, "type.googleapis.com") and + // PackFrom(message, "type.googleapis.com/") yield the same result type + // URL: "type.googleapis.com/". + // Returns false if serializing the message failed. + template + bool PackFrom(Arena* arena, const T& message, StringPiece type_url_prefix) + { + return InternalPackFrom(arena, message, type_url_prefix, T::FullMessageName()); + } + + bool PackFrom(Arena* arena, const Message& message, StringPiece type_url_prefix); + + // Unpacks the payload into the given message. Returns false if the message's + // type doesn't match the type specified in the type URL (i.e., the full + // name after the last "/" of the type URL doesn't match the message's actual + // full name) or parsing the payload has failed. + template + bool UnpackTo(T* message) const + { + return InternalUnpackTo(T::FullMessageName(), message); + } + + bool UnpackTo(Message* message) const; + + // Checks whether the type specified in the type URL matches the given type. + // A type is considered matching if its full name matches the full name after + // the last "/" in the type URL. + template + bool Is() const + { + return InternalIs(T::FullMessageName()); + } + + private: + bool InternalPackFrom(Arena* arena, const MessageLite& message, StringPiece type_url_prefix, StringPiece type_name); + bool InternalUnpackTo(StringPiece type_name, MessageLite* message) const; + bool InternalIs(StringPiece type_name) const; + + UrlType* type_url_; + ValueType* value_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(AnyMetadata); + }; + + // Get the proto type name from Any::type_url value. For example, passing + // "type.googleapis.com/rpc.QueryOrigin" will return "rpc.QueryOrigin" in + // *full_type_name. Returns false if the type_url does not have a "/" + // in the type url separating the full type name. + // + // NOTE: this function is available publicly as a static method on the + // generated message type: google::protobuf::Any::ParseAnyTypeUrl() + bool ParseAnyTypeUrl(StringPiece type_url, std::string* full_type_name); + + // Get the proto type name and prefix from Any::type_url value. For example, + // passing "type.googleapis.com/rpc.QueryOrigin" will return + // "type.googleapis.com/" in *url_prefix and "rpc.QueryOrigin" in + // *full_type_name. Returns false if the type_url does not have a "/" in the + // type url separating the full type name. + bool ParseAnyTypeUrl(StringPiece type_url, std::string* url_prefix, std::string* full_type_name); + + // See if message is of type google.protobuf.Any, if so, return the descriptors + // for "type_url" and "value" fields. + bool GetAnyFieldDescriptors(const Message& message, const FieldDescriptor** type_url_field, const FieldDescriptor** value_field); + + } // namespace internal + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_ANY_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/any.pb.h b/CAPI/cpp/grpc/include/google/protobuf/any.pb.h new file mode 100644 index 00000000..760f22be --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/any.pb.h @@ -0,0 +1,452 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/protobuf/any.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fany_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fany_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021012 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fany_2eproto PROTOBUF_EXPORT +PROTOBUF_NAMESPACE_OPEN +namespace internal +{ + class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct PROTOBUF_EXPORT TableStruct_google_2fprotobuf_2fany_2eproto +{ + static const uint32_t offsets[]; +}; +PROTOBUF_EXPORT extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google_2fprotobuf_2fany_2eproto; +PROTOBUF_NAMESPACE_OPEN +class Any; +struct AnyDefaultTypeInternal; +PROTOBUF_EXPORT extern AnyDefaultTypeInternal _Any_default_instance_; +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN +template<> +PROTOBUF_EXPORT ::PROTOBUF_NAMESPACE_ID::Any* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::Any>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN + +// =================================================================== + +class PROTOBUF_EXPORT Any final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Any) */ +{ +public: + inline Any() : + Any(nullptr) + { + } + ~Any() override; + explicit PROTOBUF_CONSTEXPR Any(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Any(const Any& from); + Any(Any&& from) noexcept + : + Any() + { + *this = ::std::move(from); + } + + inline Any& operator=(const Any& from) + { + CopyFrom(from); + return *this; + } + inline Any& operator=(Any&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const Any& default_instance() + { + return *internal_default_instance(); + } + static inline const Any* internal_default_instance() + { + return reinterpret_cast( + &_Any_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 0; + + // implements Any ----------------------------------------------- + + bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message) + { + GOOGLE_DCHECK_NE(&message, this); + return _impl_._any_metadata_.PackFrom(GetArena(), message); + } + bool PackFrom(const ::PROTOBUF_NAMESPACE_ID::Message& message, ::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) + { + GOOGLE_DCHECK_NE(&message, this); + return _impl_._any_metadata_.PackFrom(GetArena(), message, type_url_prefix); + } + bool UnpackTo(::PROTOBUF_NAMESPACE_ID::Message* message) const + { + return _impl_._any_metadata_.UnpackTo(message); + } + static bool GetAnyFieldDescriptors( + const ::PROTOBUF_NAMESPACE_ID::Message& message, + const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** type_url_field, + const ::PROTOBUF_NAMESPACE_ID::FieldDescriptor** value_field + ); + template::value>::type> + bool PackFrom(const T& message) + { + return _impl_._any_metadata_.PackFrom(GetArena(), message); + } + template::value>::type> + bool PackFrom(const T& message, ::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url_prefix) + { + return _impl_._any_metadata_.PackFrom(GetArena(), message, type_url_prefix); + } + template::value>::type> + bool UnpackTo(T* message) const + { + return _impl_._any_metadata_.UnpackTo(message); + } + template + bool Is() const + { + return _impl_._any_metadata_.Is(); + } + static bool ParseAnyTypeUrl(::PROTOBUF_NAMESPACE_ID::ConstStringParam type_url, std::string* full_type_name); + friend void swap(Any& a, Any& b) + { + a.Swap(&b); + } + inline void Swap(Any* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Any* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + Any* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Any& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Any& from) + { + Any::MergeImpl(*this, from); + } + +private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + +public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + +private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Any* other); + +private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.Any"; + } + +protected: + explicit Any(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + +public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kTypeUrlFieldNumber = 1, + kValueFieldNumber = 2, + }; + // string type_url = 1; + void clear_type_url(); + const std::string& type_url() const; + template + void set_type_url(ArgT0&& arg0, ArgT... args); + std::string* mutable_type_url(); + PROTOBUF_NODISCARD std::string* release_type_url(); + void set_allocated_type_url(std::string* type_url); + +private: + const std::string& _internal_type_url() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_type_url(const std::string& value); + std::string* _internal_mutable_type_url(); + +public: + // bytes value = 2; + void clear_value(); + const std::string& value() const; + template + void set_value(ArgT0&& arg0, ArgT... args); + std::string* mutable_value(); + PROTOBUF_NODISCARD std::string* release_value(); + void set_allocated_value(std::string* value); + +private: + const std::string& _internal_value() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_value(const std::string& value); + std::string* _internal_mutable_value(); + +public: + // @@protoc_insertion_point(class_scope:google.protobuf.Any) + +private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr type_url_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr value_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata _any_metadata_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fany_2eproto; +}; +// =================================================================== + +// =================================================================== + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Any + +// string type_url = 1; +inline void Any::clear_type_url() +{ + _impl_.type_url_.ClearToEmpty(); +} +inline const std::string& Any::type_url() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Any.type_url) + return _internal_type_url(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Any::set_type_url(ArgT0&& arg0, ArgT... args) +{ + _impl_.type_url_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Any.type_url) +} +inline std::string* Any::mutable_type_url() +{ + std::string* _s = _internal_mutable_type_url(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Any.type_url) + return _s; +} +inline const std::string& Any::_internal_type_url() const +{ + return _impl_.type_url_.Get(); +} +inline void Any::_internal_set_type_url(const std::string& value) +{ + _impl_.type_url_.Set(value, GetArenaForAllocation()); +} +inline std::string* Any::_internal_mutable_type_url() +{ + return _impl_.type_url_.Mutable(GetArenaForAllocation()); +} +inline std::string* Any::release_type_url() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Any.type_url) + return _impl_.type_url_.Release(); +} +inline void Any::set_allocated_type_url(std::string* type_url) +{ + if (type_url != nullptr) + { + } + else + { + } + _impl_.type_url_.SetAllocated(type_url, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.type_url_.IsDefault()) + { + _impl_.type_url_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Any.type_url) +} + +// bytes value = 2; +inline void Any::clear_value() +{ + _impl_.value_.ClearToEmpty(); +} +inline const std::string& Any::value() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Any.value) + return _internal_value(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Any::set_value(ArgT0&& arg0, ArgT... args) +{ + _impl_.value_.SetBytes(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Any.value) +} +inline std::string* Any::mutable_value() +{ + std::string* _s = _internal_mutable_value(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Any.value) + return _s; +} +inline const std::string& Any::_internal_value() const +{ + return _impl_.value_.Get(); +} +inline void Any::_internal_set_value(const std::string& value) +{ + _impl_.value_.Set(value, GetArenaForAllocation()); +} +inline std::string* Any::_internal_mutable_value() +{ + return _impl_.value_.Mutable(GetArenaForAllocation()); +} +inline std::string* Any::release_value() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Any.value) + return _impl_.value_.Release(); +} +inline void Any::set_allocated_value(std::string* value) +{ + if (value != nullptr) + { + } + else + { + } + _impl_.value_.SetAllocated(value, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.value_.IsDefault()) + { + _impl_.value_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Any.value) +} + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif // __GNUC__ + +// @@protoc_insertion_point(namespace_scope) + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fany_2eproto diff --git a/CAPI/cpp/grpc/include/google/protobuf/any.proto b/CAPI/cpp/grpc/include/google/protobuf/any.proto new file mode 100644 index 00000000..e2c2042f --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/any.proto @@ -0,0 +1,158 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/CAPI/cpp/grpc/include/google/protobuf/api.pb.h b/CAPI/cpp/grpc/include/google/protobuf/api.pb.h new file mode 100644 index 00000000..5d1b21f3 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/api.pb.h @@ -0,0 +1,1724 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/protobuf/api.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fapi_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fapi_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021012 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fapi_2eproto PROTOBUF_EXPORT +PROTOBUF_NAMESPACE_OPEN +namespace internal +{ + class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct PROTOBUF_EXPORT TableStruct_google_2fprotobuf_2fapi_2eproto +{ + static const uint32_t offsets[]; +}; +PROTOBUF_EXPORT extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google_2fprotobuf_2fapi_2eproto; +PROTOBUF_NAMESPACE_OPEN +class Api; +struct ApiDefaultTypeInternal; +PROTOBUF_EXPORT extern ApiDefaultTypeInternal _Api_default_instance_; +class Method; +struct MethodDefaultTypeInternal; +PROTOBUF_EXPORT extern MethodDefaultTypeInternal _Method_default_instance_; +class Mixin; +struct MixinDefaultTypeInternal; +PROTOBUF_EXPORT extern MixinDefaultTypeInternal _Mixin_default_instance_; +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN +template<> +PROTOBUF_EXPORT ::PROTOBUF_NAMESPACE_ID::Api* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::Api>(Arena*); +template<> +PROTOBUF_EXPORT ::PROTOBUF_NAMESPACE_ID::Method* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::Method>(Arena*); +template<> +PROTOBUF_EXPORT ::PROTOBUF_NAMESPACE_ID::Mixin* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::Mixin>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN + +// =================================================================== + +class PROTOBUF_EXPORT Api final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Api) */ +{ +public: + inline Api() : + Api(nullptr) + { + } + ~Api() override; + explicit PROTOBUF_CONSTEXPR Api(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Api(const Api& from); + Api(Api&& from) noexcept + : + Api() + { + *this = ::std::move(from); + } + + inline Api& operator=(const Api& from) + { + CopyFrom(from); + return *this; + } + inline Api& operator=(Api&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const Api& default_instance() + { + return *internal_default_instance(); + } + static inline const Api* internal_default_instance() + { + return reinterpret_cast( + &_Api_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(Api& a, Api& b) + { + a.Swap(&b); + } + inline void Swap(Api* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Api* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + Api* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Api& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Api& from) + { + Api::MergeImpl(*this, from); + } + +private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + +public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + +private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Api* other); + +private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.Api"; + } + +protected: + explicit Api(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + +public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kMethodsFieldNumber = 2, + kOptionsFieldNumber = 3, + kMixinsFieldNumber = 6, + kNameFieldNumber = 1, + kVersionFieldNumber = 4, + kSourceContextFieldNumber = 5, + kSyntaxFieldNumber = 7, + }; + // repeated .google.protobuf.Method methods = 2; + int methods_size() const; + +private: + int _internal_methods_size() const; + +public: + void clear_methods(); + ::PROTOBUF_NAMESPACE_ID::Method* mutable_methods(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Method>* + mutable_methods(); + +private: + const ::PROTOBUF_NAMESPACE_ID::Method& _internal_methods(int index) const; + ::PROTOBUF_NAMESPACE_ID::Method* _internal_add_methods(); + +public: + const ::PROTOBUF_NAMESPACE_ID::Method& methods(int index) const; + ::PROTOBUF_NAMESPACE_ID::Method* add_methods(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Method>& + methods() const; + + // repeated .google.protobuf.Option options = 3; + int options_size() const; + +private: + int _internal_options_size() const; + +public: + void clear_options(); + ::PROTOBUF_NAMESPACE_ID::Option* mutable_options(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>* + mutable_options(); + +private: + const ::PROTOBUF_NAMESPACE_ID::Option& _internal_options(int index) const; + ::PROTOBUF_NAMESPACE_ID::Option* _internal_add_options(); + +public: + const ::PROTOBUF_NAMESPACE_ID::Option& options(int index) const; + ::PROTOBUF_NAMESPACE_ID::Option* add_options(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>& + options() const; + + // repeated .google.protobuf.Mixin mixins = 6; + int mixins_size() const; + +private: + int _internal_mixins_size() const; + +public: + void clear_mixins(); + ::PROTOBUF_NAMESPACE_ID::Mixin* mutable_mixins(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Mixin>* + mutable_mixins(); + +private: + const ::PROTOBUF_NAMESPACE_ID::Mixin& _internal_mixins(int index) const; + ::PROTOBUF_NAMESPACE_ID::Mixin* _internal_add_mixins(); + +public: + const ::PROTOBUF_NAMESPACE_ID::Mixin& mixins(int index) const; + ::PROTOBUF_NAMESPACE_ID::Mixin* add_mixins(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Mixin>& + mixins() const; + + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + +private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + +public: + // string version = 4; + void clear_version(); + const std::string& version() const; + template + void set_version(ArgT0&& arg0, ArgT... args); + std::string* mutable_version(); + PROTOBUF_NODISCARD std::string* release_version(); + void set_allocated_version(std::string* version); + +private: + const std::string& _internal_version() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_version(const std::string& value); + std::string* _internal_mutable_version(); + +public: + // .google.protobuf.SourceContext source_context = 5; + bool has_source_context() const; + +private: + bool _internal_has_source_context() const; + +public: + void clear_source_context(); + const ::PROTOBUF_NAMESPACE_ID::SourceContext& source_context() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::SourceContext* release_source_context(); + ::PROTOBUF_NAMESPACE_ID::SourceContext* mutable_source_context(); + void set_allocated_source_context(::PROTOBUF_NAMESPACE_ID::SourceContext* source_context); + +private: + const ::PROTOBUF_NAMESPACE_ID::SourceContext& _internal_source_context() const; + ::PROTOBUF_NAMESPACE_ID::SourceContext* _internal_mutable_source_context(); + +public: + void unsafe_arena_set_allocated_source_context( + ::PROTOBUF_NAMESPACE_ID::SourceContext* source_context + ); + ::PROTOBUF_NAMESPACE_ID::SourceContext* unsafe_arena_release_source_context(); + + // .google.protobuf.Syntax syntax = 7; + void clear_syntax(); + ::PROTOBUF_NAMESPACE_ID::Syntax syntax() const; + void set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value); + +private: + ::PROTOBUF_NAMESPACE_ID::Syntax _internal_syntax() const; + void _internal_set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value); + +public: + // @@protoc_insertion_point(class_scope:google.protobuf.Api) + +private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Method> methods_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option> options_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Mixin> mixins_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr version_; + ::PROTOBUF_NAMESPACE_ID::SourceContext* source_context_; + int syntax_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fapi_2eproto; +}; +// ------------------------------------------------------------------- + +class PROTOBUF_EXPORT Method final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Method) */ +{ +public: + inline Method() : + Method(nullptr) + { + } + ~Method() override; + explicit PROTOBUF_CONSTEXPR Method(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Method(const Method& from); + Method(Method&& from) noexcept + : + Method() + { + *this = ::std::move(from); + } + + inline Method& operator=(const Method& from) + { + CopyFrom(from); + return *this; + } + inline Method& operator=(Method&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const Method& default_instance() + { + return *internal_default_instance(); + } + static inline const Method* internal_default_instance() + { + return reinterpret_cast( + &_Method_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(Method& a, Method& b) + { + a.Swap(&b); + } + inline void Swap(Method* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Method* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + Method* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Method& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Method& from) + { + Method::MergeImpl(*this, from); + } + +private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + +public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + +private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Method* other); + +private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.Method"; + } + +protected: + explicit Method(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + +public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kOptionsFieldNumber = 6, + kNameFieldNumber = 1, + kRequestTypeUrlFieldNumber = 2, + kResponseTypeUrlFieldNumber = 4, + kRequestStreamingFieldNumber = 3, + kResponseStreamingFieldNumber = 5, + kSyntaxFieldNumber = 7, + }; + // repeated .google.protobuf.Option options = 6; + int options_size() const; + +private: + int _internal_options_size() const; + +public: + void clear_options(); + ::PROTOBUF_NAMESPACE_ID::Option* mutable_options(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>* + mutable_options(); + +private: + const ::PROTOBUF_NAMESPACE_ID::Option& _internal_options(int index) const; + ::PROTOBUF_NAMESPACE_ID::Option* _internal_add_options(); + +public: + const ::PROTOBUF_NAMESPACE_ID::Option& options(int index) const; + ::PROTOBUF_NAMESPACE_ID::Option* add_options(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>& + options() const; + + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + +private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + +public: + // string request_type_url = 2; + void clear_request_type_url(); + const std::string& request_type_url() const; + template + void set_request_type_url(ArgT0&& arg0, ArgT... args); + std::string* mutable_request_type_url(); + PROTOBUF_NODISCARD std::string* release_request_type_url(); + void set_allocated_request_type_url(std::string* request_type_url); + +private: + const std::string& _internal_request_type_url() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_request_type_url(const std::string& value); + std::string* _internal_mutable_request_type_url(); + +public: + // string response_type_url = 4; + void clear_response_type_url(); + const std::string& response_type_url() const; + template + void set_response_type_url(ArgT0&& arg0, ArgT... args); + std::string* mutable_response_type_url(); + PROTOBUF_NODISCARD std::string* release_response_type_url(); + void set_allocated_response_type_url(std::string* response_type_url); + +private: + const std::string& _internal_response_type_url() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_response_type_url(const std::string& value); + std::string* _internal_mutable_response_type_url(); + +public: + // bool request_streaming = 3; + void clear_request_streaming(); + bool request_streaming() const; + void set_request_streaming(bool value); + +private: + bool _internal_request_streaming() const; + void _internal_set_request_streaming(bool value); + +public: + // bool response_streaming = 5; + void clear_response_streaming(); + bool response_streaming() const; + void set_response_streaming(bool value); + +private: + bool _internal_response_streaming() const; + void _internal_set_response_streaming(bool value); + +public: + // .google.protobuf.Syntax syntax = 7; + void clear_syntax(); + ::PROTOBUF_NAMESPACE_ID::Syntax syntax() const; + void set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value); + +private: + ::PROTOBUF_NAMESPACE_ID::Syntax _internal_syntax() const; + void _internal_set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value); + +public: + // @@protoc_insertion_point(class_scope:google.protobuf.Method) + +private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option> options_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr request_type_url_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr response_type_url_; + bool request_streaming_; + bool response_streaming_; + int syntax_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fapi_2eproto; +}; +// ------------------------------------------------------------------- + +class PROTOBUF_EXPORT Mixin final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Mixin) */ +{ +public: + inline Mixin() : + Mixin(nullptr) + { + } + ~Mixin() override; + explicit PROTOBUF_CONSTEXPR Mixin(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Mixin(const Mixin& from); + Mixin(Mixin&& from) noexcept + : + Mixin() + { + *this = ::std::move(from); + } + + inline Mixin& operator=(const Mixin& from) + { + CopyFrom(from); + return *this; + } + inline Mixin& operator=(Mixin&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const Mixin& default_instance() + { + return *internal_default_instance(); + } + static inline const Mixin* internal_default_instance() + { + return reinterpret_cast( + &_Mixin_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(Mixin& a, Mixin& b) + { + a.Swap(&b); + } + inline void Swap(Mixin* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Mixin* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + Mixin* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Mixin& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Mixin& from) + { + Mixin::MergeImpl(*this, from); + } + +private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + +public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + +private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Mixin* other); + +private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.Mixin"; + } + +protected: + explicit Mixin(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + +public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kNameFieldNumber = 1, + kRootFieldNumber = 2, + }; + // string name = 1; + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + +private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + +public: + // string root = 2; + void clear_root(); + const std::string& root() const; + template + void set_root(ArgT0&& arg0, ArgT... args); + std::string* mutable_root(); + PROTOBUF_NODISCARD std::string* release_root(); + void set_allocated_root(std::string* root); + +private: + const std::string& _internal_root() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_root(const std::string& value); + std::string* _internal_mutable_root(); + +public: + // @@protoc_insertion_point(class_scope:google.protobuf.Mixin) + +private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr root_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fapi_2eproto; +}; +// =================================================================== + +// =================================================================== + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ +// Api + +// string name = 1; +inline void Api::clear_name() +{ + _impl_.name_.ClearToEmpty(); +} +inline const std::string& Api::name() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Api::set_name(ArgT0&& arg0, ArgT... args) +{ + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Api.name) +} +inline std::string* Api::mutable_name() +{ + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.name) + return _s; +} +inline const std::string& Api::_internal_name() const +{ + return _impl_.name_.Get(); +} +inline void Api::_internal_set_name(const std::string& value) +{ + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* Api::_internal_mutable_name() +{ + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* Api::release_name() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Api.name) + return _impl_.name_.Release(); +} +inline void Api::set_allocated_name(std::string* name) +{ + if (name != nullptr) + { + } + else + { + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) + { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.name) +} + +// repeated .google.protobuf.Method methods = 2; +inline int Api::_internal_methods_size() const +{ + return _impl_.methods_.size(); +} +inline int Api::methods_size() const +{ + return _internal_methods_size(); +} +inline void Api::clear_methods() +{ + _impl_.methods_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::Method* Api::mutable_methods(int index) +{ + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.methods) + return _impl_.methods_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Method>* + Api::mutable_methods() +{ + // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.methods) + return &_impl_.methods_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Method& Api::_internal_methods(int index) const +{ + return _impl_.methods_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Method& Api::methods(int index) const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.methods) + return _internal_methods(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Method* Api::_internal_add_methods() +{ + return _impl_.methods_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Method* Api::add_methods() +{ + ::PROTOBUF_NAMESPACE_ID::Method* _add = _internal_add_methods(); + // @@protoc_insertion_point(field_add:google.protobuf.Api.methods) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Method>& + Api::methods() const +{ + // @@protoc_insertion_point(field_list:google.protobuf.Api.methods) + return _impl_.methods_; +} + +// repeated .google.protobuf.Option options = 3; +inline int Api::_internal_options_size() const +{ + return _impl_.options_.size(); +} +inline int Api::options_size() const +{ + return _internal_options_size(); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Api::mutable_options(int index) +{ + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.options) + return _impl_.options_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>* + Api::mutable_options() +{ + // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.options) + return &_impl_.options_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Option& Api::_internal_options(int index) const +{ + return _impl_.options_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Option& Api::options(int index) const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.options) + return _internal_options(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Api::_internal_add_options() +{ + return _impl_.options_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Api::add_options() +{ + ::PROTOBUF_NAMESPACE_ID::Option* _add = _internal_add_options(); + // @@protoc_insertion_point(field_add:google.protobuf.Api.options) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>& + Api::options() const +{ + // @@protoc_insertion_point(field_list:google.protobuf.Api.options) + return _impl_.options_; +} + +// string version = 4; +inline void Api::clear_version() +{ + _impl_.version_.ClearToEmpty(); +} +inline const std::string& Api::version() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.version) + return _internal_version(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Api::set_version(ArgT0&& arg0, ArgT... args) +{ + _impl_.version_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Api.version) +} +inline std::string* Api::mutable_version() +{ + std::string* _s = _internal_mutable_version(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.version) + return _s; +} +inline const std::string& Api::_internal_version() const +{ + return _impl_.version_.Get(); +} +inline void Api::_internal_set_version(const std::string& value) +{ + _impl_.version_.Set(value, GetArenaForAllocation()); +} +inline std::string* Api::_internal_mutable_version() +{ + return _impl_.version_.Mutable(GetArenaForAllocation()); +} +inline std::string* Api::release_version() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Api.version) + return _impl_.version_.Release(); +} +inline void Api::set_allocated_version(std::string* version) +{ + if (version != nullptr) + { + } + else + { + } + _impl_.version_.SetAllocated(version, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.version_.IsDefault()) + { + _impl_.version_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.version) +} + +// .google.protobuf.SourceContext source_context = 5; +inline bool Api::_internal_has_source_context() const +{ + return this != internal_default_instance() && _impl_.source_context_ != nullptr; +} +inline bool Api::has_source_context() const +{ + return _internal_has_source_context(); +} +inline const ::PROTOBUF_NAMESPACE_ID::SourceContext& Api::_internal_source_context() const +{ + const ::PROTOBUF_NAMESPACE_ID::SourceContext* p = _impl_.source_context_; + return p != nullptr ? *p : reinterpret_cast(::PROTOBUF_NAMESPACE_ID::_SourceContext_default_instance_); +} +inline const ::PROTOBUF_NAMESPACE_ID::SourceContext& Api::source_context() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.source_context) + return _internal_source_context(); +} +inline void Api::unsafe_arena_set_allocated_source_context( + ::PROTOBUF_NAMESPACE_ID::SourceContext* source_context +) +{ + if (GetArenaForAllocation() == nullptr) + { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_context_); + } + _impl_.source_context_ = source_context; + if (source_context) + { + } + else + { + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Api.source_context) +} +inline ::PROTOBUF_NAMESPACE_ID::SourceContext* Api::release_source_context() +{ + ::PROTOBUF_NAMESPACE_ID::SourceContext* temp = _impl_.source_context_; + _impl_.source_context_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) + { + delete old; + } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) + { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::SourceContext* Api::unsafe_arena_release_source_context() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Api.source_context) + + ::PROTOBUF_NAMESPACE_ID::SourceContext* temp = _impl_.source_context_; + _impl_.source_context_ = nullptr; + return temp; +} +inline ::PROTOBUF_NAMESPACE_ID::SourceContext* Api::_internal_mutable_source_context() +{ + if (_impl_.source_context_ == nullptr) + { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::SourceContext>(GetArenaForAllocation()); + _impl_.source_context_ = p; + } + return _impl_.source_context_; +} +inline ::PROTOBUF_NAMESPACE_ID::SourceContext* Api::mutable_source_context() +{ + ::PROTOBUF_NAMESPACE_ID::SourceContext* _msg = _internal_mutable_source_context(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.source_context) + return _msg; +} +inline void Api::set_allocated_source_context(::PROTOBUF_NAMESPACE_ID::SourceContext* source_context) +{ + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) + { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.source_context_); + } + if (source_context) + { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(source_context) + ); + if (message_arena != submessage_arena) + { + source_context = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, source_context, submessage_arena + ); + } + } + else + { + } + _impl_.source_context_ = source_context; + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.source_context) +} + +// repeated .google.protobuf.Mixin mixins = 6; +inline int Api::_internal_mixins_size() const +{ + return _impl_.mixins_.size(); +} +inline int Api::mixins_size() const +{ + return _internal_mixins_size(); +} +inline void Api::clear_mixins() +{ + _impl_.mixins_.Clear(); +} +inline ::PROTOBUF_NAMESPACE_ID::Mixin* Api::mutable_mixins(int index) +{ + // @@protoc_insertion_point(field_mutable:google.protobuf.Api.mixins) + return _impl_.mixins_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Mixin>* + Api::mutable_mixins() +{ + // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.mixins) + return &_impl_.mixins_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Mixin& Api::_internal_mixins(int index) const +{ + return _impl_.mixins_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Mixin& Api::mixins(int index) const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.mixins) + return _internal_mixins(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Mixin* Api::_internal_add_mixins() +{ + return _impl_.mixins_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Mixin* Api::add_mixins() +{ + ::PROTOBUF_NAMESPACE_ID::Mixin* _add = _internal_add_mixins(); + // @@protoc_insertion_point(field_add:google.protobuf.Api.mixins) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Mixin>& + Api::mixins() const +{ + // @@protoc_insertion_point(field_list:google.protobuf.Api.mixins) + return _impl_.mixins_; +} + +// .google.protobuf.Syntax syntax = 7; +inline void Api::clear_syntax() +{ + _impl_.syntax_ = 0; +} +inline ::PROTOBUF_NAMESPACE_ID::Syntax Api::_internal_syntax() const +{ + return static_cast<::PROTOBUF_NAMESPACE_ID::Syntax>(_impl_.syntax_); +} +inline ::PROTOBUF_NAMESPACE_ID::Syntax Api::syntax() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Api.syntax) + return _internal_syntax(); +} +inline void Api::_internal_set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value) +{ + _impl_.syntax_ = value; +} +inline void Api::set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value) +{ + _internal_set_syntax(value); + // @@protoc_insertion_point(field_set:google.protobuf.Api.syntax) +} + +// ------------------------------------------------------------------- + +// Method + +// string name = 1; +inline void Method::clear_name() +{ + _impl_.name_.ClearToEmpty(); +} +inline const std::string& Method::name() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Method::set_name(ArgT0&& arg0, ArgT... args) +{ + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Method.name) +} +inline std::string* Method::mutable_name() +{ + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Method.name) + return _s; +} +inline const std::string& Method::_internal_name() const +{ + return _impl_.name_.Get(); +} +inline void Method::_internal_set_name(const std::string& value) +{ + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* Method::_internal_mutable_name() +{ + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* Method::release_name() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Method.name) + return _impl_.name_.Release(); +} +inline void Method::set_allocated_name(std::string* name) +{ + if (name != nullptr) + { + } + else + { + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) + { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.name) +} + +// string request_type_url = 2; +inline void Method::clear_request_type_url() +{ + _impl_.request_type_url_.ClearToEmpty(); +} +inline const std::string& Method::request_type_url() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.request_type_url) + return _internal_request_type_url(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Method::set_request_type_url(ArgT0&& arg0, ArgT... args) +{ + _impl_.request_type_url_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Method.request_type_url) +} +inline std::string* Method::mutable_request_type_url() +{ + std::string* _s = _internal_mutable_request_type_url(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Method.request_type_url) + return _s; +} +inline const std::string& Method::_internal_request_type_url() const +{ + return _impl_.request_type_url_.Get(); +} +inline void Method::_internal_set_request_type_url(const std::string& value) +{ + _impl_.request_type_url_.Set(value, GetArenaForAllocation()); +} +inline std::string* Method::_internal_mutable_request_type_url() +{ + return _impl_.request_type_url_.Mutable(GetArenaForAllocation()); +} +inline std::string* Method::release_request_type_url() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Method.request_type_url) + return _impl_.request_type_url_.Release(); +} +inline void Method::set_allocated_request_type_url(std::string* request_type_url) +{ + if (request_type_url != nullptr) + { + } + else + { + } + _impl_.request_type_url_.SetAllocated(request_type_url, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.request_type_url_.IsDefault()) + { + _impl_.request_type_url_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.request_type_url) +} + +// bool request_streaming = 3; +inline void Method::clear_request_streaming() +{ + _impl_.request_streaming_ = false; +} +inline bool Method::_internal_request_streaming() const +{ + return _impl_.request_streaming_; +} +inline bool Method::request_streaming() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.request_streaming) + return _internal_request_streaming(); +} +inline void Method::_internal_set_request_streaming(bool value) +{ + _impl_.request_streaming_ = value; +} +inline void Method::set_request_streaming(bool value) +{ + _internal_set_request_streaming(value); + // @@protoc_insertion_point(field_set:google.protobuf.Method.request_streaming) +} + +// string response_type_url = 4; +inline void Method::clear_response_type_url() +{ + _impl_.response_type_url_.ClearToEmpty(); +} +inline const std::string& Method::response_type_url() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.response_type_url) + return _internal_response_type_url(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Method::set_response_type_url(ArgT0&& arg0, ArgT... args) +{ + _impl_.response_type_url_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Method.response_type_url) +} +inline std::string* Method::mutable_response_type_url() +{ + std::string* _s = _internal_mutable_response_type_url(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Method.response_type_url) + return _s; +} +inline const std::string& Method::_internal_response_type_url() const +{ + return _impl_.response_type_url_.Get(); +} +inline void Method::_internal_set_response_type_url(const std::string& value) +{ + _impl_.response_type_url_.Set(value, GetArenaForAllocation()); +} +inline std::string* Method::_internal_mutable_response_type_url() +{ + return _impl_.response_type_url_.Mutable(GetArenaForAllocation()); +} +inline std::string* Method::release_response_type_url() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Method.response_type_url) + return _impl_.response_type_url_.Release(); +} +inline void Method::set_allocated_response_type_url(std::string* response_type_url) +{ + if (response_type_url != nullptr) + { + } + else + { + } + _impl_.response_type_url_.SetAllocated(response_type_url, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.response_type_url_.IsDefault()) + { + _impl_.response_type_url_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.response_type_url) +} + +// bool response_streaming = 5; +inline void Method::clear_response_streaming() +{ + _impl_.response_streaming_ = false; +} +inline bool Method::_internal_response_streaming() const +{ + return _impl_.response_streaming_; +} +inline bool Method::response_streaming() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.response_streaming) + return _internal_response_streaming(); +} +inline void Method::_internal_set_response_streaming(bool value) +{ + _impl_.response_streaming_ = value; +} +inline void Method::set_response_streaming(bool value) +{ + _internal_set_response_streaming(value); + // @@protoc_insertion_point(field_set:google.protobuf.Method.response_streaming) +} + +// repeated .google.protobuf.Option options = 6; +inline int Method::_internal_options_size() const +{ + return _impl_.options_.size(); +} +inline int Method::options_size() const +{ + return _internal_options_size(); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Method::mutable_options(int index) +{ + // @@protoc_insertion_point(field_mutable:google.protobuf.Method.options) + return _impl_.options_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>* + Method::mutable_options() +{ + // @@protoc_insertion_point(field_mutable_list:google.protobuf.Method.options) + return &_impl_.options_; +} +inline const ::PROTOBUF_NAMESPACE_ID::Option& Method::_internal_options(int index) const +{ + return _impl_.options_.Get(index); +} +inline const ::PROTOBUF_NAMESPACE_ID::Option& Method::options(int index) const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.options) + return _internal_options(index); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Method::_internal_add_options() +{ + return _impl_.options_.Add(); +} +inline ::PROTOBUF_NAMESPACE_ID::Option* Method::add_options() +{ + ::PROTOBUF_NAMESPACE_ID::Option* _add = _internal_add_options(); + // @@protoc_insertion_point(field_add:google.protobuf.Method.options) + return _add; +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::Option>& + Method::options() const +{ + // @@protoc_insertion_point(field_list:google.protobuf.Method.options) + return _impl_.options_; +} + +// .google.protobuf.Syntax syntax = 7; +inline void Method::clear_syntax() +{ + _impl_.syntax_ = 0; +} +inline ::PROTOBUF_NAMESPACE_ID::Syntax Method::_internal_syntax() const +{ + return static_cast<::PROTOBUF_NAMESPACE_ID::Syntax>(_impl_.syntax_); +} +inline ::PROTOBUF_NAMESPACE_ID::Syntax Method::syntax() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Method.syntax) + return _internal_syntax(); +} +inline void Method::_internal_set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value) +{ + _impl_.syntax_ = value; +} +inline void Method::set_syntax(::PROTOBUF_NAMESPACE_ID::Syntax value) +{ + _internal_set_syntax(value); + // @@protoc_insertion_point(field_set:google.protobuf.Method.syntax) +} + +// ------------------------------------------------------------------- + +// Mixin + +// string name = 1; +inline void Mixin::clear_name() +{ + _impl_.name_.ClearToEmpty(); +} +inline const std::string& Mixin::name() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Mixin.name) + return _internal_name(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Mixin::set_name(ArgT0&& arg0, ArgT... args) +{ + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Mixin.name) +} +inline std::string* Mixin::mutable_name() +{ + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Mixin.name) + return _s; +} +inline const std::string& Mixin::_internal_name() const +{ + return _impl_.name_.Get(); +} +inline void Mixin::_internal_set_name(const std::string& value) +{ + _impl_.name_.Set(value, GetArenaForAllocation()); +} +inline std::string* Mixin::_internal_mutable_name() +{ + return _impl_.name_.Mutable(GetArenaForAllocation()); +} +inline std::string* Mixin::release_name() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Mixin.name) + return _impl_.name_.Release(); +} +inline void Mixin::set_allocated_name(std::string* name) +{ + if (name != nullptr) + { + } + else + { + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) + { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Mixin.name) +} + +// string root = 2; +inline void Mixin::clear_root() +{ + _impl_.root_.ClearToEmpty(); +} +inline const std::string& Mixin::root() const +{ + // @@protoc_insertion_point(field_get:google.protobuf.Mixin.root) + return _internal_root(); +} +template +inline PROTOBUF_ALWAYS_INLINE void Mixin::set_root(ArgT0&& arg0, ArgT... args) +{ + _impl_.root_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.Mixin.root) +} +inline std::string* Mixin::mutable_root() +{ + std::string* _s = _internal_mutable_root(); + // @@protoc_insertion_point(field_mutable:google.protobuf.Mixin.root) + return _s; +} +inline const std::string& Mixin::_internal_root() const +{ + return _impl_.root_.Get(); +} +inline void Mixin::_internal_set_root(const std::string& value) +{ + _impl_.root_.Set(value, GetArenaForAllocation()); +} +inline std::string* Mixin::_internal_mutable_root() +{ + return _impl_.root_.Mutable(GetArenaForAllocation()); +} +inline std::string* Mixin::release_root() +{ + // @@protoc_insertion_point(field_release:google.protobuf.Mixin.root) + return _impl_.root_.Release(); +} +inline void Mixin::set_allocated_root(std::string* root) +{ + if (root != nullptr) + { + } + else + { + } + _impl_.root_.SetAllocated(root, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.root_.IsDefault()) + { + _impl_.root_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.Mixin.root) +} + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif // __GNUC__ +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// @@protoc_insertion_point(namespace_scope) + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fapi_2eproto diff --git a/CAPI/cpp/grpc/include/google/protobuf/api.proto b/CAPI/cpp/grpc/include/google/protobuf/api.proto new file mode 100644 index 00000000..3d598fc8 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/api.proto @@ -0,0 +1,208 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/apipb"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inheriting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/CAPI/cpp/grpc/include/google/protobuf/arena.h b/CAPI/cpp/grpc/include/google/protobuf/arena.h new file mode 100644 index 00000000..a60d22a4 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/arena.h @@ -0,0 +1,962 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file defines an Arena allocator for better allocation performance. + +#ifndef GOOGLE_PROTOBUF_ARENA_H__ +#define GOOGLE_PROTOBUF_ARENA_H__ + +#include +#include +#include +#if defined(_MSC_VER) && !defined(_LIBCPP_STD_VER) && !_HAS_EXCEPTIONS +// Work around bugs in MSVC header when _HAS_EXCEPTIONS=0. +#include +#include +namespace std +{ + using type_info = ::type_info; +} +#else +#include +#endif + +#include +#include +#include + +// Must be included last. +#include + +#ifdef SWIG +#error "You cannot SWIG proto headers" +#endif + +namespace google +{ + namespace protobuf + { + + struct ArenaOptions; // defined below + class Arena; // defined below + class Message; // defined in message.h + class MessageLite; + template + class Map; + + namespace arena_metrics + { + + void EnableArenaMetrics(ArenaOptions* options); + + } // namespace arena_metrics + + namespace TestUtil + { + class ReflectionTester; // defined in test_util.h + } // namespace TestUtil + + namespace internal + { + + struct ArenaTestPeer; // defined in arena_test_util.h + class InternalMetadata; // defined in metadata_lite.h + class LazyField; // defined in lazy_field.h + class EpsCopyInputStream; // defined in parse_context.h + class RepeatedPtrFieldBase; // defined in repeated_ptr_field.h + + template + class GenericTypeHandler; // defined in repeated_field.h + + inline PROTOBUF_ALWAYS_INLINE void* AlignTo(void* ptr, size_t align) + { + return reinterpret_cast( + (reinterpret_cast(ptr) + align - 1) & (~align + 1) + ); + } + + // Templated cleanup methods. + template + void arena_destruct_object(void* object) + { + reinterpret_cast(object)->~T(); + } + + template + struct ObjectDestructor + { + constexpr static void (*destructor)(void*) = &arena_destruct_object; + }; + + template + struct ObjectDestructor + { + constexpr static void (*destructor)(void*) = nullptr; + }; + + template + void arena_delete_object(void* object) + { + delete reinterpret_cast(object); + } + } // namespace internal + + // ArenaOptions provides optional additional parameters to arena construction + // that control its block-allocation behavior. + struct ArenaOptions + { + // This defines the size of the first block requested from the system malloc. + // Subsequent block sizes will increase in a geometric series up to a maximum. + size_t start_block_size; + + // This defines the maximum block size requested from system malloc (unless an + // individual arena allocation request occurs with a size larger than this + // maximum). Requested block sizes increase up to this value, then remain + // here. + size_t max_block_size; + + // An initial block of memory for the arena to use, or NULL for none. If + // provided, the block must live at least as long as the arena itself. The + // creator of the Arena retains ownership of the block after the Arena is + // destroyed. + char* initial_block; + + // The size of the initial block, if provided. + size_t initial_block_size; + + // A function pointer to an alloc method that returns memory blocks of size + // requested. By default, it contains a ptr to the malloc function. + // + // NOTE: block_alloc and dealloc functions are expected to behave like + // malloc and free, including Asan poisoning. + void* (*block_alloc)(size_t); + // A function pointer to a dealloc method that takes ownership of the blocks + // from the arena. By default, it contains a ptr to a wrapper function that + // calls free. + void (*block_dealloc)(void*, size_t); + + ArenaOptions() : + start_block_size(internal::AllocationPolicy::kDefaultStartBlockSize), + max_block_size(internal::AllocationPolicy::kDefaultMaxBlockSize), + initial_block(NULL), + initial_block_size(0), + block_alloc(nullptr), + block_dealloc(nullptr), + make_metrics_collector(nullptr) + { + } + + private: + // If make_metrics_collector is not nullptr, it will be called at Arena init + // time. It may return a pointer to a collector instance that will be notified + // of interesting events related to the arena. + internal::ArenaMetricsCollector* (*make_metrics_collector)(); + + internal::ArenaMetricsCollector* MetricsCollector() const + { + return make_metrics_collector ? (*make_metrics_collector)() : nullptr; + } + + internal::AllocationPolicy AllocationPolicy() const + { + internal::AllocationPolicy res; + res.start_block_size = start_block_size; + res.max_block_size = max_block_size; + res.block_alloc = block_alloc; + res.block_dealloc = block_dealloc; + res.metrics_collector = MetricsCollector(); + return res; + } + + friend void arena_metrics::EnableArenaMetrics(ArenaOptions*); + + friend class Arena; + friend class ArenaOptionsTestFriend; + }; + +// Support for non-RTTI environments. (The metrics hooks API uses type +// information.) +#if PROTOBUF_RTTI +#define RTTI_TYPE_ID(type) (&typeid(type)) +#else +#define RTTI_TYPE_ID(type) (NULL) +#endif + + // Arena allocator. Arena allocation replaces ordinary (heap-based) allocation + // with new/delete, and improves performance by aggregating allocations into + // larger blocks and freeing allocations all at once. Protocol messages are + // allocated on an arena by using Arena::CreateMessage(Arena*), below, and + // are automatically freed when the arena is destroyed. + // + // This is a thread-safe implementation: multiple threads may allocate from the + // arena concurrently. Destruction is not thread-safe and the destructing + // thread must synchronize with users of the arena first. + // + // An arena provides two allocation interfaces: CreateMessage, which works + // for arena-enabled proto2 message types as well as other types that satisfy + // the appropriate protocol (described below), and Create, which works for + // any arbitrary type T. CreateMessage is better when the type T supports it, + // because this interface (i) passes the arena pointer to the created object so + // that its sub-objects and internal allocations can use the arena too, and (ii) + // elides the object's destructor call when possible. Create does not place + // any special requirements on the type T, and will invoke the object's + // destructor when the arena is destroyed. + // + // The arena message allocation protocol, required by + // CreateMessage(Arena* arena, Args&&... args), is as follows: + // + // - The type T must have (at least) two constructors: a constructor callable + // with `args` (without `arena`), called when a T is allocated on the heap; + // and a constructor callable with `Arena* arena, Args&&... args`, called when + // a T is allocated on an arena. If the second constructor is called with a + // NULL arena pointer, it must be equivalent to invoking the first + // (`args`-only) constructor. + // + // - The type T must have a particular type trait: a nested type + // |InternalArenaConstructable_|. This is usually a typedef to |void|. If no + // such type trait exists, then the instantiation CreateMessage will fail + // to compile. + // + // - The type T *may* have the type trait |DestructorSkippable_|. If this type + // trait is present in the type, then its destructor will not be called if and + // only if it was passed a non-NULL arena pointer. If this type trait is not + // present on the type, then its destructor is always called when the + // containing arena is destroyed. + // + // This protocol is implemented by all arena-enabled proto2 message classes as + // well as protobuf container types like RepeatedPtrField and Map. The protocol + // is internal to protobuf and is not guaranteed to be stable. Non-proto types + // should not rely on this protocol. + class PROTOBUF_EXPORT PROTOBUF_ALIGNAS(8) Arena final + { + public: + // Default constructor with sensible default options, tuned for average + // use-cases. + inline Arena() : + impl_() + { + } + + // Construct an arena with default options, except for the supplied + // initial block. It is more efficient to use this constructor + // instead of passing ArenaOptions if the only configuration needed + // by the caller is supplying an initial block. + inline Arena(char* initial_block, size_t initial_block_size) : + impl_(initial_block, initial_block_size) + { + } + + // Arena constructor taking custom options. See ArenaOptions above for + // descriptions of the options available. + explicit Arena(const ArenaOptions& options) : + impl_(options.initial_block, options.initial_block_size, options.AllocationPolicy()) + { + } + + // Block overhead. Use this as a guide for how much to over-allocate the + // initial block if you want an allocation of size N to fit inside it. + // + // WARNING: if you allocate multiple objects, it is difficult to guarantee + // that a series of allocations will fit in the initial block, especially if + // Arena changes its alignment guarantees in the future! + static const size_t kBlockOverhead = + internal::ThreadSafeArena::kBlockHeaderSize + + internal::ThreadSafeArena::kSerialArenaSize; + + inline ~Arena() + { + } + + // TODO(protobuf-team): Fix callers to use constructor and delete this method. + void Init(const ArenaOptions&) + { + } + + // API to create proto2 message objects on the arena. If the arena passed in + // is NULL, then a heap allocated object is returned. Type T must be a message + // defined in a .proto file with cc_enable_arenas set to true, otherwise a + // compilation error will occur. + // + // RepeatedField and RepeatedPtrField may also be instantiated directly on an + // arena with this method. + // + // This function also accepts any type T that satisfies the arena message + // allocation protocol, documented above. + template + PROTOBUF_ALWAYS_INLINE static T* CreateMessage(Arena* arena, Args&&... args) + { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable" + ); + // We must delegate to CreateMaybeMessage() and NOT CreateMessageInternal() + // because protobuf generated classes specialize CreateMaybeMessage() and we + // need to use that specialization for code size reasons. + return Arena::CreateMaybeMessage(arena, static_cast(args)...); + } + + // API to create any objects on the arena. Note that only the object will + // be created on the arena; the underlying ptrs (in case of a proto2 message) + // will be still heap allocated. Proto messages should usually be allocated + // with CreateMessage() instead. + // + // Note that even if T satisfies the arena message construction protocol + // (InternalArenaConstructable_ trait and optional DestructorSkippable_ + // trait), as described above, this function does not follow the protocol; + // instead, it treats T as a black-box type, just as if it did not have these + // traits. Specifically, T's constructor arguments will always be only those + // passed to Create() -- no additional arena pointer is implicitly added. + // Furthermore, the destructor will always be called at arena destruction time + // (unless the destructor is trivial). Hence, from T's point of view, it is as + // if the object were allocated on the heap (except that the underlying memory + // is obtained from the arena). + template + PROTOBUF_NDEBUG_INLINE static T* Create(Arena* arena, Args&&... args) + { + return CreateInternal(arena, std::is_convertible(), static_cast(args)...); + } + + // Allocates memory with the specific size and alignment. + void* AllocateAligned(size_t size, size_t align = 8) + { + if (align <= 8) + { + return AllocateAlignedNoHook(internal::AlignUpTo8(size)); + } + else + { + // We are wasting space by over allocating align - 8 bytes. Compared + // to a dedicated function that takes current alignment in consideration. + // Such a scheme would only waste (align - 8)/2 bytes on average, but + // requires a dedicated function in the outline arena allocation + // functions. Possibly re-evaluate tradeoffs later. + return internal::AlignTo(AllocateAlignedNoHook(size + align - 8), align); + } + } + + // Create an array of object type T on the arena *without* invoking the + // constructor of T. If `arena` is null, then the return value should be freed + // with `delete[] x;` (or `::operator delete[](x);`). + // To ensure safe uses, this function checks at compile time + // (when compiled as C++11) that T is trivially default-constructible and + // trivially destructible. + template + PROTOBUF_NDEBUG_INLINE static T* CreateArray(Arena* arena, size_t num_elements) + { + static_assert(std::is_trivial::value, "CreateArray requires a trivially constructible type"); + static_assert(std::is_trivially_destructible::value, "CreateArray requires a trivially destructible type"); + GOOGLE_CHECK_LE(num_elements, std::numeric_limits::max() / sizeof(T)) + << "Requested size is too large to fit into size_t."; + if (arena == NULL) + { + return static_cast(::operator new[](num_elements * sizeof(T))); + } + else + { + return arena->CreateInternalRawArray(num_elements); + } + } + + // The following are routines are for monitoring. They will approximate the + // total sum allocated and used memory, but the exact value is an + // implementation deal. For instance allocated space depends on growth + // policies. Do not use these in unit tests. + // Returns the total space allocated by the arena, which is the sum of the + // sizes of the underlying blocks. + uint64_t SpaceAllocated() const + { + return impl_.SpaceAllocated(); + } + // Returns the total space used by the arena. Similar to SpaceAllocated but + // does not include free space and block overhead. The total space returned + // may not include space used by other threads executing concurrently with + // the call to this method. + uint64_t SpaceUsed() const + { + return impl_.SpaceUsed(); + } + + // Frees all storage allocated by this arena after calling destructors + // registered with OwnDestructor() and freeing objects registered with Own(). + // Any objects allocated on this arena are unusable after this call. It also + // returns the total space used by the arena which is the sums of the sizes + // of the allocated blocks. This method is not thread-safe. + uint64_t Reset() + { + return impl_.Reset(); + } + + // Adds |object| to a list of heap-allocated objects to be freed with |delete| + // when the arena is destroyed or reset. + template + PROTOBUF_ALWAYS_INLINE void Own(T* object) + { + OwnInternal(object, std::is_convertible()); + } + + // Adds |object| to a list of objects whose destructors will be manually + // called when the arena is destroyed or reset. This differs from Own() in + // that it does not free the underlying memory with |delete|; hence, it is + // normally only used for objects that are placement-newed into + // arena-allocated memory. + template + PROTOBUF_ALWAYS_INLINE void OwnDestructor(T* object) + { + if (object != NULL) + { + impl_.AddCleanup(object, &internal::arena_destruct_object); + } + } + + // Adds a custom member function on an object to the list of destructors that + // will be manually called when the arena is destroyed or reset. This differs + // from OwnDestructor() in that any member function may be specified, not only + // the class destructor. + PROTOBUF_ALWAYS_INLINE void OwnCustomDestructor(void* object, void (*destruct)(void*)) + { + impl_.AddCleanup(object, destruct); + } + + // Retrieves the arena associated with |value| if |value| is an arena-capable + // message, or NULL otherwise. If possible, the call resolves at compile time. + // Note that we can often devirtualize calls to `value->GetArena()` so usually + // calling this method is unnecessary. + template + PROTOBUF_ALWAYS_INLINE static Arena* GetArena(const T* value) + { + return GetArenaInternal(value); + } + + template + class InternalHelper + { + private: + // Provides access to protected GetOwningArena to generated messages. + static Arena* GetOwningArena(const T* p) + { + return p->GetOwningArena(); + } + + static void InternalSwap(T* a, T* b) + { + a->InternalSwap(b); + } + + static Arena* GetArenaForAllocationInternal( + const T* p, std::true_type /*is_derived_from*/ + ) + { + return p->GetArenaForAllocation(); + } + + static Arena* GetArenaForAllocationInternal( + const T* p, std::false_type /*is_derived_from*/ + ) + { + return GetArenaForAllocationForNonMessage( + p, typename is_arena_constructable::type() + ); + } + + static Arena* GetArenaForAllocationForNonMessage( + const T* p, std::true_type /*is_arena_constructible*/ + ) + { + return p->GetArena(); + } + + static Arena* GetArenaForAllocationForNonMessage( + const T* p, std::false_type /*is_arena_constructible*/ + ) + { + return GetArenaForAllocationForNonMessageNonArenaConstructible( + p, typename has_get_arena::type() + ); + } + + static Arena* GetArenaForAllocationForNonMessageNonArenaConstructible( + const T* p, std::true_type /*has_get_arena*/ + ) + { + return p->GetArena(); + } + + static Arena* GetArenaForAllocationForNonMessageNonArenaConstructible( + const T* /* p */, std::false_type /*has_get_arena*/ + ) + { + return nullptr; + } + + template + static char DestructorSkippable(const typename U::DestructorSkippable_*); + template + static double DestructorSkippable(...); + + typedef std::integral_constant< + bool, + sizeof(DestructorSkippable(static_cast(0))) == + sizeof(char) || + std::is_trivially_destructible::value> + is_destructor_skippable; + + template + static char ArenaConstructable( + const typename U::InternalArenaConstructable_* + ); + template + static double ArenaConstructable(...); + + typedef std::integral_constant(static_cast(0))) == sizeof(char)> + is_arena_constructable; + + template().GetArena())>::value, int>::type = 0> + static char HasGetArena(decltype(&U::GetArena)); + template + static double HasGetArena(...); + + typedef std::integral_constant(nullptr)) == sizeof(char)> + has_get_arena; + + template + static T* Construct(void* ptr, Args&&... args) + { + return new (ptr) T(static_cast(args)...); + } + + static inline PROTOBUF_ALWAYS_INLINE T* New() + { + return new T(nullptr); + } + + static Arena* GetArena(const T* p) + { + return p->GetArena(); + } + + friend class Arena; + friend class TestUtil::ReflectionTester; + }; + + // Provides access to protected GetOwningArena to generated messages. For + // internal use only. + template + static Arena* InternalGetOwningArena(const T* p) + { + return InternalHelper::GetOwningArena(p); + } + + // Provides access to protected GetArenaForAllocation to generated messages. + // For internal use only. + template + static Arena* InternalGetArenaForAllocation(const T* p) + { + return InternalHelper::GetArenaForAllocationInternal( + p, std::is_convertible() + ); + } + + // Creates message-owned arena. For internal use only. + static Arena* InternalCreateMessageOwnedArena() + { + return new Arena(internal::MessageOwned{}); + } + + // Checks whether this arena is message-owned. For internal use only. + bool InternalIsMessageOwnedArena() + { + return IsMessageOwned(); + } + + // Helper typetraits that indicates support for arenas in a type T at compile + // time. This is public only to allow construction of higher-level templated + // utilities. + // + // is_arena_constructable::value is true if the message type T has arena + // support enabled, and false otherwise. + // + // is_destructor_skippable::value is true if the message type T has told + // the arena that it is safe to skip the destructor, and false otherwise. + // + // This is inside Arena because only Arena has the friend relationships + // necessary to see the underlying generated code traits. + template + struct is_arena_constructable : InternalHelper::is_arena_constructable + { + }; + template + struct is_destructor_skippable : InternalHelper::is_destructor_skippable + { + }; + + private: + internal::ThreadSafeArena impl_; + + template + struct has_get_arena : InternalHelper::has_get_arena + { + }; + + // Constructor solely used by message-owned arena. + inline Arena(internal::MessageOwned) : + impl_(internal::MessageOwned{}) + { + } + + // Checks whether this arena is message-owned. + PROTOBUF_ALWAYS_INLINE bool IsMessageOwned() const + { + return impl_.IsMessageOwned(); + } + + void ReturnArrayMemory(void* p, size_t size) + { + impl_.ReturnArrayMemory(p, size); + } + + template + PROTOBUF_NDEBUG_INLINE static T* CreateMessageInternal(Arena* arena, Args&&... args) + { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable" + ); + if (arena == NULL) + { + return new T(nullptr, static_cast(args)...); + } + else + { + return arena->DoCreateMessage(static_cast(args)...); + } + } + + // This specialization for no arguments is necessary, because its behavior is + // slightly different. When the arena pointer is nullptr, it calls T() + // instead of T(nullptr). + template + PROTOBUF_NDEBUG_INLINE static T* CreateMessageInternal(Arena* arena) + { + static_assert( + InternalHelper::is_arena_constructable::value, + "CreateMessage can only construct types that are ArenaConstructable" + ); + if (arena == NULL) + { + // Generated arena constructor T(Arena*) is protected. Call via + // InternalHelper. + return InternalHelper::New(); + } + else + { + return arena->DoCreateMessage(); + } + } + + // Allocate and also optionally call collector with the allocated type info + // when allocation recording is enabled. + PROTOBUF_NDEBUG_INLINE void* AllocateInternal(size_t size, size_t align, void (*destructor)(void*), const std::type_info* type) + { + // Monitor allocation if needed. + if (destructor == nullptr) + { + return AllocateAlignedWithHook(size, align, type); + } + else + { + if (align <= 8) + { + auto res = AllocateAlignedWithCleanup(internal::AlignUpTo8(size), type); + res.second->elem = res.first; + res.second->cleanup = destructor; + return res.first; + } + else + { + auto res = AllocateAlignedWithCleanup(size + align - 8, type); + auto ptr = internal::AlignTo(res.first, align); + res.second->elem = ptr; + res.second->cleanup = destructor; + return ptr; + } + } + } + + // CreateMessage requires that T supports arenas, but this private method + // works whether or not T supports arenas. These are not exposed to user code + // as it can cause confusing API usages, and end up having double free in + // user code. These are used only internally from LazyField and Repeated + // fields, since they are designed to work in all mode combinations. + template + PROTOBUF_ALWAYS_INLINE static Msg* DoCreateMaybeMessage(Arena* arena, std::true_type, Args&&... args) + { + return CreateMessageInternal(arena, std::forward(args)...); + } + + template + PROTOBUF_ALWAYS_INLINE static T* DoCreateMaybeMessage(Arena* arena, std::false_type, Args&&... args) + { + return Create(arena, std::forward(args)...); + } + + template + PROTOBUF_ALWAYS_INLINE static T* CreateMaybeMessage(Arena* arena, Args&&... args) + { + return DoCreateMaybeMessage(arena, is_arena_constructable(), std::forward(args)...); + } + + // Just allocate the required size for the given type assuming the + // type has a trivial constructor. + template + PROTOBUF_NDEBUG_INLINE T* CreateInternalRawArray(size_t num_elements) + { + GOOGLE_CHECK_LE(num_elements, std::numeric_limits::max() / sizeof(T)) + << "Requested size is too large to fit into size_t."; + // We count on compiler to realize that if sizeof(T) is a multiple of + // 8 AlignUpTo can be elided. + const size_t n = sizeof(T) * num_elements; + return static_cast( + AllocateAlignedWithHookForArray(n, alignof(T), RTTI_TYPE_ID(T)) + ); + } + + template + PROTOBUF_NDEBUG_INLINE T* DoCreateMessage(Args&&... args) + { + return InternalHelper::Construct( + AllocateInternal(sizeof(T), alignof(T), internal::ObjectDestructor::is_destructor_skippable::value, T>::destructor, RTTI_TYPE_ID(T)), + this, + std::forward(args)... + ); + } + + // CreateInArenaStorage is used to implement map field. Without it, + // Map need to call generated message's protected arena constructor, + // which needs to declare Map as friend of generated message. + template + static void CreateInArenaStorage(T* ptr, Arena* arena, Args&&... args) + { + CreateInArenaStorageInternal(ptr, arena, typename is_arena_constructable::type(), std::forward(args)...); + if (arena != nullptr) + { + RegisterDestructorInternal( + ptr, arena, typename InternalHelper::is_destructor_skippable::type() + ); + } + } + + template + static void CreateInArenaStorageInternal(T* ptr, Arena* arena, std::true_type, Args&&... args) + { + InternalHelper::Construct(ptr, arena, std::forward(args)...); + } + template + static void CreateInArenaStorageInternal(T* ptr, Arena* /* arena */, std::false_type, Args&&... args) + { + new (ptr) T(std::forward(args)...); + } + + template + static void RegisterDestructorInternal(T* /* ptr */, Arena* /* arena */, std::true_type) + { + } + template + static void RegisterDestructorInternal(T* ptr, Arena* arena, std::false_type) + { + arena->OwnDestructor(ptr); + } + + // These implement Create(). The second parameter has type 'true_type' if T is + // a subtype of Message and 'false_type' otherwise. + template + PROTOBUF_ALWAYS_INLINE static T* CreateInternal(Arena* arena, std::true_type, Args&&... args) + { + if (arena == nullptr) + { + return new T(std::forward(args)...); + } + else + { + auto destructor = + internal::ObjectDestructor::value, T>::destructor; + T* result = + new (arena->AllocateInternal(sizeof(T), alignof(T), destructor, RTTI_TYPE_ID(T))) + T(std::forward(args)...); + return result; + } + } + template + PROTOBUF_ALWAYS_INLINE static T* CreateInternal(Arena* arena, std::false_type, Args&&... args) + { + if (arena == nullptr) + { + return new T(std::forward(args)...); + } + else + { + auto destructor = + internal::ObjectDestructor::value, T>::destructor; + return new (arena->AllocateInternal(sizeof(T), alignof(T), destructor, RTTI_TYPE_ID(T))) + T(std::forward(args)...); + } + } + + // These implement Own(), which registers an object for deletion (destructor + // call and operator delete()). The second parameter has type 'true_type' if T + // is a subtype of Message and 'false_type' otherwise. Collapsing + // all template instantiations to one for generic Message reduces code size, + // using the virtual destructor instead. + template + PROTOBUF_ALWAYS_INLINE void OwnInternal(T* object, std::true_type) + { + if (object != NULL) + { + impl_.AddCleanup(object, &internal::arena_delete_object); + } + } + template + PROTOBUF_ALWAYS_INLINE void OwnInternal(T* object, std::false_type) + { + if (object != NULL) + { + impl_.AddCleanup(object, &internal::arena_delete_object); + } + } + + // Implementation for GetArena(). Only message objects with + // InternalArenaConstructable_ tags can be associated with an arena, and such + // objects must implement a GetArena() method. + template::value, int>::type = 0> + PROTOBUF_ALWAYS_INLINE static Arena* GetArenaInternal(const T* value) + { + return InternalHelper::GetArena(value); + } + template::value && has_get_arena::value, int>::type = 0> + PROTOBUF_ALWAYS_INLINE static Arena* GetArenaInternal(const T* value) + { + return value->GetArena(); + } + template::value && !has_get_arena::value, int>::type = 0> + PROTOBUF_ALWAYS_INLINE static Arena* GetArenaInternal(const T* value) + { + (void)value; + return nullptr; + } + + template + PROTOBUF_ALWAYS_INLINE static Arena* GetOwningArena(const T* value) + { + return GetOwningArenaInternal( + value, std::is_convertible() + ); + } + + // Implementation for GetOwningArena(). All and only message objects have + // GetOwningArena() method. + template + PROTOBUF_ALWAYS_INLINE static Arena* GetOwningArenaInternal( + const T* value, std::true_type + ) + { + return InternalHelper::GetOwningArena(value); + } + template + PROTOBUF_ALWAYS_INLINE static Arena* GetOwningArenaInternal( + const T* /* value */, std::false_type + ) + { + return nullptr; + } + + void* AllocateAlignedWithHookForArray(size_t n, size_t align, const std::type_info* type) + { + if (align <= 8) + { + return AllocateAlignedWithHookForArray(internal::AlignUpTo8(n), type); + } + else + { + // We are wasting space by over allocating align - 8 bytes. Compared + // to a dedicated function that takes current alignment in consideration. + // Such a scheme would only waste (align - 8)/2 bytes on average, but + // requires a dedicated function in the outline arena allocation + // functions. Possibly re-evaluate tradeoffs later. + return internal::AlignTo( + AllocateAlignedWithHookForArray(n + align - 8, type), align + ); + } + } + + void* AllocateAlignedWithHook(size_t n, size_t align, const std::type_info* type) + { + if (align <= 8) + { + return AllocateAlignedWithHook(internal::AlignUpTo8(n), type); + } + else + { + // We are wasting space by over allocating align - 8 bytes. Compared + // to a dedicated function that takes current alignment in consideration. + // Such a scheme would only waste (align - 8)/2 bytes on average, but + // requires a dedicated function in the outline arena allocation + // functions. Possibly re-evaluate tradeoffs later. + return internal::AlignTo(AllocateAlignedWithHook(n + align - 8, type), align); + } + } + + void* AllocateAlignedNoHook(size_t n); + void* AllocateAlignedWithHook(size_t n, const std::type_info* type); + void* AllocateAlignedWithHookForArray(size_t n, const std::type_info* type); + std::pair + AllocateAlignedWithCleanup(size_t n, const std::type_info* type); + + template + friend class internal::GenericTypeHandler; + friend class internal::InternalMetadata; // For user_arena(). + friend class internal::LazyField; // For CreateMaybeMessage. + friend class internal::EpsCopyInputStream; // For parser performance + friend class MessageLite; + template + friend class Map; + template + friend class RepeatedField; // For ReturnArrayMemory + friend class internal::RepeatedPtrFieldBase; // For ReturnArrayMemory + friend struct internal::ArenaTestPeer; + }; + +// Defined above for supporting environments without RTTI. +#undef RTTI_TYPE_ID + + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_ARENA_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/arena_impl.h b/CAPI/cpp/grpc/include/google/protobuf/arena_impl.h new file mode 100644 index 00000000..1b88b77a --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/arena_impl.h @@ -0,0 +1,825 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file defines an Arena allocator for better allocation performance. + +#ifndef GOOGLE_PROTOBUF_ARENA_IMPL_H__ +#define GOOGLE_PROTOBUF_ARENA_IMPL_H__ + +#include +#include +#include + +#include +#include +#include + +#ifdef ADDRESS_SANITIZER +#include +#endif // ADDRESS_SANITIZER + +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace internal + { + +// To prevent sharing cache lines between threads +#ifdef __cpp_aligned_new + enum + { + kCacheAlignment = 64 + }; +#else + enum + { + kCacheAlignment = alignof(max_align_t) + }; // do the best we can +#endif + + inline constexpr size_t AlignUpTo8(size_t n) + { + // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.) + return (n + 7) & static_cast(-8); + } + + using LifecycleIdAtomic = uint64_t; + + // MetricsCollector collects stats for a particular arena. + class PROTOBUF_EXPORT ArenaMetricsCollector + { + public: + ArenaMetricsCollector(bool record_allocs) : + record_allocs_(record_allocs) + { + } + + // Invoked when the arena is about to be destroyed. This method will + // typically finalize any metric collection and delete the collector. + // space_allocated is the space used by the arena. + virtual void OnDestroy(uint64_t space_allocated) = 0; + + // OnReset() is called when the associated arena is reset. + // space_allocated is the space used by the arena just before the reset. + virtual void OnReset(uint64_t space_allocated) = 0; + + // OnAlloc is called when an allocation happens. + // type_info is promised to be static - its lifetime extends to + // match program's lifetime (It is given by typeid operator). + // Note: typeid(void) will be passed as allocated_type every time we + // intentionally want to avoid monitoring an allocation. (i.e. internal + // allocations for managing the arena) + virtual void OnAlloc(const std::type_info* allocated_type, uint64_t alloc_size) = 0; + + // Does OnAlloc() need to be called? If false, metric collection overhead + // will be reduced since we will not do extra work per allocation. + bool RecordAllocs() + { + return record_allocs_; + } + + protected: + // This class is destructed by the call to OnDestroy(). + ~ArenaMetricsCollector() = default; + const bool record_allocs_; + }; + + struct AllocationPolicy + { + static constexpr size_t kDefaultStartBlockSize = 256; + static constexpr size_t kDefaultMaxBlockSize = 8192; + + size_t start_block_size = kDefaultStartBlockSize; + size_t max_block_size = kDefaultMaxBlockSize; + void* (*block_alloc)(size_t) = nullptr; + void (*block_dealloc)(void*, size_t) = nullptr; + ArenaMetricsCollector* metrics_collector = nullptr; + + bool IsDefault() const + { + return start_block_size == kDefaultMaxBlockSize && + max_block_size == kDefaultMaxBlockSize && block_alloc == nullptr && + block_dealloc == nullptr && metrics_collector == nullptr; + } + }; + + // Tagged pointer to an AllocationPolicy. + class TaggedAllocationPolicyPtr + { + public: + constexpr TaggedAllocationPolicyPtr() : + policy_(0) + { + } + + explicit TaggedAllocationPolicyPtr(AllocationPolicy* policy) : + policy_(reinterpret_cast(policy)) + { + } + + void set_policy(AllocationPolicy* policy) + { + auto bits = policy_ & kTagsMask; + policy_ = reinterpret_cast(policy) | bits; + } + + AllocationPolicy* get() + { + return reinterpret_cast(policy_ & kPtrMask); + } + const AllocationPolicy* get() const + { + return reinterpret_cast(policy_ & kPtrMask); + } + + AllocationPolicy& operator*() + { + return *get(); + } + const AllocationPolicy& operator*() const + { + return *get(); + } + + AllocationPolicy* operator->() + { + return get(); + } + const AllocationPolicy* operator->() const + { + return get(); + } + + bool is_user_owned_initial_block() const + { + return static_cast(get_mask()); + } + void set_is_user_owned_initial_block(bool v) + { + set_mask(v); + } + + bool should_record_allocs() const + { + return static_cast(get_mask()); + } + void set_should_record_allocs(bool v) + { + set_mask(v); + } + + uintptr_t get_raw() const + { + return policy_; + } + + inline void RecordAlloc(const std::type_info* allocated_type, size_t n) const + { + get()->metrics_collector->OnAlloc(allocated_type, n); + } + + private: + enum : uintptr_t + { + kUserOwnedInitialBlock = 1, + kRecordAllocs = 2, + }; + + static constexpr uintptr_t kTagsMask = 7; + static constexpr uintptr_t kPtrMask = ~kTagsMask; + + template + uintptr_t get_mask() const + { + return policy_ & kMask; + } + template + void set_mask(bool v) + { + if (v) + { + policy_ |= kMask; + } + else + { + policy_ &= ~kMask; + } + } + uintptr_t policy_; + }; + + enum class AllocationClient + { + kDefault, + kArray + }; + + // A simple arena allocator. Calls to allocate functions must be properly + // serialized by the caller, hence this class cannot be used as a general + // purpose allocator in a multi-threaded program. It serves as a building block + // for ThreadSafeArena, which provides a thread-safe arena allocator. + // + // This class manages + // 1) Arena bump allocation + owning memory blocks. + // 2) Maintaining a cleanup list. + // It delagetes the actual memory allocation back to ThreadSafeArena, which + // contains the information on block growth policy and backing memory allocation + // used. + class PROTOBUF_EXPORT SerialArena + { + public: + struct Memory + { + void* ptr; + size_t size; + }; + + // Node contains the ptr of the object to be cleaned up and the associated + // cleanup function ptr. + struct CleanupNode + { + void* elem; // Pointer to the object to be cleaned up. + void (*cleanup)(void*); // Function pointer to the destructor or deleter. + }; + + void CleanupList(); + uint64_t SpaceAllocated() const + { + return space_allocated_.load(std::memory_order_relaxed); + } + uint64_t SpaceUsed() const; + + bool HasSpace(size_t n) const + { + return n <= static_cast(limit_ - ptr_); + } + + // See comments on `cached_blocks_` member for details. + PROTOBUF_ALWAYS_INLINE void* TryAllocateFromCachedBlock(size_t size) + { + if (PROTOBUF_PREDICT_FALSE(size < 16)) + return nullptr; + // We round up to the next larger block in case the memory doesn't match + // the pattern we are looking for. + const size_t index = Bits::Log2FloorNonZero64(size - 1) - 3; + + if (index >= cached_block_length_) + return nullptr; + auto& cached_head = cached_blocks_[index]; + if (cached_head == nullptr) + return nullptr; + + void* ret = cached_head; +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(ret, size); +#endif // ADDRESS_SANITIZER + cached_head = cached_head->next; + return ret; + } + + // In kArray mode we look through cached blocks. + // We do not do this by default because most non-array allocations will not + // have the right size and will fail to find an appropriate cached block. + // + // TODO(sbenza): Evaluate if we should use cached blocks for message types of + // the right size. We can statically know if the allocation size can benefit + // from it. + template + void* AllocateAligned(size_t n, const AllocationPolicy* policy) + { + GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. + GOOGLE_DCHECK_GE(limit_, ptr_); + + if (alloc_client == AllocationClient::kArray) + { + if (void* res = TryAllocateFromCachedBlock(n)) + { + return res; + } + } + + if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) + { + return AllocateAlignedFallback(n, policy); + } + return AllocateFromExisting(n); + } + + private: + void* AllocateFromExisting(size_t n) + { + void* ret = ptr_; + ptr_ += n; +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(ret, n); +#endif // ADDRESS_SANITIZER + return ret; + } + + // See comments on `cached_blocks_` member for details. + void ReturnArrayMemory(void* p, size_t size) + { + // We only need to check for 32-bit platforms. + // In 64-bit platforms the minimum allocation size from Repeated*Field will + // be 16 guaranteed. + if (sizeof(void*) < 8) + { + if (PROTOBUF_PREDICT_FALSE(size < 16)) + return; + } + else + { + GOOGLE_DCHECK(size >= 16); + } + + // We round down to the next smaller block in case the memory doesn't match + // the pattern we are looking for. eg, someone might have called Reserve() + // on the repeated field. + const size_t index = Bits::Log2FloorNonZero64(size) - 4; + + if (PROTOBUF_PREDICT_FALSE(index >= cached_block_length_)) + { + // We can't put this object on the freelist so make this object the + // freelist. It is guaranteed it is larger than the one we have, and + // large enough to hold another allocation of `size`. + CachedBlock** new_list = static_cast(p); + size_t new_size = size / sizeof(CachedBlock*); + + std::copy(cached_blocks_, cached_blocks_ + cached_block_length_, new_list); + std::fill(new_list + cached_block_length_, new_list + new_size, nullptr); + cached_blocks_ = new_list; + // Make the size fit in uint8_t. This is the power of two, so we don't + // need anything larger. + cached_block_length_ = + static_cast(std::min(size_t{64}, new_size)); + + return; + } + + auto& cached_head = cached_blocks_[index]; + auto* new_node = static_cast(p); + new_node->next = cached_head; + cached_head = new_node; +#ifdef ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(p, size); +#endif // ADDRESS_SANITIZER + } + + public: + // Allocate space if the current region provides enough space. + bool MaybeAllocateAligned(size_t n, void** out) + { + GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. + GOOGLE_DCHECK_GE(limit_, ptr_); + if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) + return false; + *out = AllocateFromExisting(n); + return true; + } + + std::pair AllocateAlignedWithCleanup( + size_t n, const AllocationPolicy* policy + ) + { + GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. + if (PROTOBUF_PREDICT_FALSE(!HasSpace(n + kCleanupSize))) + { + return AllocateAlignedWithCleanupFallback(n, policy); + } + return AllocateFromExistingWithCleanupFallback(n); + } + + private: + std::pair AllocateFromExistingWithCleanupFallback( + size_t n + ) + { + void* ret = ptr_; + ptr_ += n; + limit_ -= kCleanupSize; +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(ret, n); + ASAN_UNPOISON_MEMORY_REGION(limit_, kCleanupSize); +#endif // ADDRESS_SANITIZER + return CreatePair(ret, reinterpret_cast(limit_)); + } + + public: + void AddCleanup(void* elem, void (*cleanup)(void*), const AllocationPolicy* policy) + { + auto res = AllocateAlignedWithCleanup(0, policy); + res.second->elem = elem; + res.second->cleanup = cleanup; + } + + void* owner() const + { + return owner_; + } + SerialArena* next() const + { + return next_; + } + void set_next(SerialArena* next) + { + next_ = next; + } + + private: + friend class ThreadSafeArena; + friend class ArenaBenchmark; + + // Creates a new SerialArena inside mem using the remaining memory as for + // future allocations. + static SerialArena* New(SerialArena::Memory mem, void* owner, ThreadSafeArenaStats* stats); + // Free SerialArena returning the memory passed in to New + template + Memory Free(Deallocator deallocator); + + // Blocks are variable length malloc-ed objects. The following structure + // describes the common header for all blocks. + struct Block + { + Block(Block* next, size_t size) : + next(next), + size(size), + start(nullptr) + { + } + + char* Pointer(size_t n) + { + GOOGLE_DCHECK(n <= size); + return reinterpret_cast(this) + n; + } + + Block* const next; + const size_t size; + CleanupNode* start; + // data follows + }; + + void* owner_; // &ThreadCache of this thread; + Block* head_; // Head of linked list of blocks. + SerialArena* next_; // Next SerialArena in this linked list. + size_t space_used_ = 0; // Necessary for metrics. + std::atomic space_allocated_; + + // Next pointer to allocate from. Always 8-byte aligned. Points inside + // head_ (and head_->pos will always be non-canonical). We keep these + // here to reduce indirection. + char* ptr_; + // Limiting address up to which memory can be allocated from the head block. + char* limit_; + // For holding sampling information. The pointer is owned by the + // ThreadSafeArena that holds this serial arena. + ThreadSafeArenaStats* arena_stats_; + + // Repeated*Field and Arena play together to reduce memory consumption by + // reusing blocks. Currently, natural growth of the repeated field types makes + // them allocate blocks of size `8 + 2^N, N>=3`. + // When the repeated field grows returns the previous block and we put it in + // this free list. + // `cached_blocks_[i]` points to the free list for blocks of size `8+2^(i+3)`. + // The array of freelists is grown when needed in `ReturnArrayMemory()`. + struct CachedBlock + { + // Simple linked list. + CachedBlock* next; + }; + uint8_t cached_block_length_ = 0; + CachedBlock** cached_blocks_ = nullptr; + + // Constructor is private as only New() should be used. + inline SerialArena(Block* b, void* owner, ThreadSafeArenaStats* stats); + void* AllocateAlignedFallback(size_t n, const AllocationPolicy* policy); + std::pair AllocateAlignedWithCleanupFallback( + size_t n, const AllocationPolicy* policy + ); + void AllocateNewBlock(size_t n, const AllocationPolicy* policy); + + std::pair CreatePair(void* ptr, CleanupNode* node) + { + return {ptr, node}; + } + + public: + static constexpr size_t kBlockHeaderSize = AlignUpTo8(sizeof(Block)); + static constexpr size_t kCleanupSize = AlignUpTo8(sizeof(CleanupNode)); + }; + + // Tag type used to invoke the constructor of message-owned arena. + // Only message-owned arenas use this constructor for creation. + // Such constructors are internal implementation details of the library. + struct MessageOwned + { + explicit MessageOwned() = default; + }; + + // This class provides the core Arena memory allocation library. Different + // implementations only need to implement the public interface below. + // Arena is not a template type as that would only be useful if all protos + // in turn would be templates, which will/cannot happen. However separating + // the memory allocation part from the cruft of the API users expect we can + // use #ifdef the select the best implementation based on hardware / OS. + class PROTOBUF_EXPORT ThreadSafeArena + { + public: + ThreadSafeArena() + { + Init(); + } + + // Constructor solely used by message-owned arena. + ThreadSafeArena(internal::MessageOwned) : + tag_and_id_(kMessageOwnedArena) + { + Init(); + } + + ThreadSafeArena(char* mem, size_t size) + { + InitializeFrom(mem, size); + } + + explicit ThreadSafeArena(void* mem, size_t size, const AllocationPolicy& policy) + { + InitializeWithPolicy(mem, size, policy); + } + + // Destructor deletes all owned heap allocated objects, and destructs objects + // that have non-trivial destructors, except for proto2 message objects whose + // destructors can be skipped. Also, frees all blocks except the initial block + // if it was passed in. + ~ThreadSafeArena(); + + uint64_t Reset(); + + uint64_t SpaceAllocated() const; + uint64_t SpaceUsed() const; + + template + void* AllocateAligned(size_t n, const std::type_info* type) + { + SerialArena* arena; + if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && GetSerialArenaFast(&arena))) + { + return arena->AllocateAligned(n, AllocPolicy()); + } + else + { + return AllocateAlignedFallback(n, type); + } + } + + void ReturnArrayMemory(void* p, size_t size) + { + SerialArena* arena; + if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) + { + arena->ReturnArrayMemory(p, size); + } + } + + // This function allocates n bytes if the common happy case is true and + // returns true. Otherwise does nothing and returns false. This strange + // semantics is necessary to allow callers to program functions that only + // have fallback function calls in tail position. This substantially improves + // code for the happy path. + PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) + { + SerialArena* arena; + if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && GetSerialArenaFromThreadCache(&arena))) + { + return arena->MaybeAllocateAligned(n, out); + } + return false; + } + + std::pair AllocateAlignedWithCleanup( + size_t n, const std::type_info* type + ); + + // Add object pointer and cleanup function pointer to the list. + void AddCleanup(void* elem, void (*cleanup)(void*)); + + // Checks whether this arena is message-owned. + PROTOBUF_ALWAYS_INLINE bool IsMessageOwned() const + { + return tag_and_id_ & kMessageOwnedArena; + } + + private: + // Unique for each arena. Changes on Reset(). + uint64_t tag_and_id_ = 0; + // The LSB of tag_and_id_ indicates if the arena is message-owned. + enum : uint64_t + { + kMessageOwnedArena = 1 + }; + + TaggedAllocationPolicyPtr alloc_policy_; // Tagged pointer to AllocPolicy. + + static_assert(std::is_trivially_destructible{}, "SerialArena needs to be trivially destructible."); + // Pointer to a linked list of SerialArena. + std::atomic threads_; + std::atomic hint_; // Fast thread-local block access + + const AllocationPolicy* AllocPolicy() const + { + return alloc_policy_.get(); + } + void InitializeFrom(void* mem, size_t size); + void InitializeWithPolicy(void* mem, size_t size, AllocationPolicy policy); + void* AllocateAlignedFallback(size_t n, const std::type_info* type); + std::pair + AllocateAlignedWithCleanupFallback(size_t n, const std::type_info* type); + + void Init(); + void SetInitialBlock(void* mem, size_t size); + + // Delete or Destruct all objects owned by the arena. + void CleanupList(); + + inline uint64_t LifeCycleId() const + { + return tag_and_id_ & ~kMessageOwnedArena; + } + + inline void CacheSerialArena(SerialArena* serial) + { + thread_cache().last_serial_arena = serial; + thread_cache().last_lifecycle_id_seen = tag_and_id_; + // TODO(haberman): evaluate whether we would gain efficiency by getting rid + // of hint_. It's the only write we do to ThreadSafeArena in the allocation + // path, which will dirty the cache line. + + hint_.store(serial, std::memory_order_release); + } + + PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(SerialArena** arena) + { + if (GetSerialArenaFromThreadCache(arena)) + return true; + + // Check whether we own the last accessed SerialArena on this arena. This + // fast path optimizes the case where a single thread uses multiple arenas. + ThreadCache* tc = &thread_cache(); + SerialArena* serial = hint_.load(std::memory_order_acquire); + if (PROTOBUF_PREDICT_TRUE(serial != nullptr && serial->owner() == tc)) + { + *arena = serial; + return true; + } + return false; + } + + PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFromThreadCache( + SerialArena** arena + ) + { + // If this thread already owns a block in this arena then try to use that. + // This fast path optimizes the case where multiple threads allocate from + // the same arena. + ThreadCache* tc = &thread_cache(); + if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == tag_and_id_)) + { + *arena = tc->last_serial_arena; + return true; + } + return false; + } + SerialArena* GetSerialArenaFallback(void* me); + + template + void PerSerialArena(Functor fn) + { + // By omitting an Acquire barrier we ensure that any user code that doesn't + // properly synchronize Reset() or the destructor will throw a TSAN warning. + SerialArena* serial = threads_.load(std::memory_order_relaxed); + + for (; serial; serial = serial->next()) + fn(serial); + } + + // Releases all memory except the first block which it returns. The first + // block might be owned by the user and thus need some extra checks before + // deleting. + SerialArena::Memory Free(size_t* space_allocated); + +#ifdef _MSC_VER +#pragma warning(disable : 4324) +#endif + struct alignas(kCacheAlignment) ThreadCache + { +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) + // If we are using the ThreadLocalStorage class to store the ThreadCache, + // then the ThreadCache's default constructor has to be responsible for + // initializing it. + ThreadCache() : + next_lifecycle_id(0), + last_lifecycle_id_seen(-1), + last_serial_arena(nullptr) + { + } +#endif + + // Number of per-thread lifecycle IDs to reserve. Must be power of two. + // To reduce contention on a global atomic, each thread reserves a batch of + // IDs. The following number is calculated based on a stress test with + // ~6500 threads all frequently allocating a new arena. + static constexpr size_t kPerThreadIds = 256; + // Next lifecycle ID available to this thread. We need to reserve a new + // batch, if `next_lifecycle_id & (kPerThreadIds - 1) == 0`. + uint64_t next_lifecycle_id; + // The ThreadCache is considered valid as long as this matches the + // lifecycle_id of the arena being used. + uint64_t last_lifecycle_id_seen; + SerialArena* last_serial_arena; + }; + + // Lifecycle_id can be highly contended variable in a situation of lots of + // arena creation. Make sure that other global variables are not sharing the + // cacheline. +#ifdef _MSC_VER +#pragma warning(disable : 4324) +#endif + struct alignas(kCacheAlignment) CacheAlignedLifecycleIdGenerator + { + std::atomic id; + }; + static CacheAlignedLifecycleIdGenerator lifecycle_id_generator_; +#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) + // iOS does not support __thread keyword so we use a custom thread local + // storage class we implemented. + static ThreadCache& thread_cache(); +#elif defined(PROTOBUF_USE_DLLS) + // Thread local variables cannot be exposed through DLL interface but we can + // wrap them in static functions. + static ThreadCache& thread_cache(); +#else + static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_; + static ThreadCache& thread_cache() + { + return thread_cache_; + } +#endif + + ThreadSafeArenaStatsHandle arena_stats_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadSafeArena); + // All protos have pointers back to the arena hence Arena must have + // pointer stability. + ThreadSafeArena(ThreadSafeArena&&) = delete; + ThreadSafeArena& operator=(ThreadSafeArena&&) = delete; + + public: + // kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8 + // to protect the invariant that pos is always at a multiple of 8. + static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize; + static constexpr size_t kSerialArenaSize = + (sizeof(SerialArena) + 7) & static_cast(-8); + static_assert(kBlockHeaderSize % 8 == 0, "kBlockHeaderSize must be a multiple of 8."); + static_assert(kSerialArenaSize % 8 == 0, "kSerialArenaSize must be a multiple of 8."); + }; + + } // namespace internal + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_ARENA_IMPL_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/arenastring.h b/CAPI/cpp/grpc/include/google/protobuf/arenastring.h new file mode 100644 index 00000000..9b95e46f --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/arenastring.h @@ -0,0 +1,552 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_ARENASTRING_H__ +#define GOOGLE_PROTOBUF_ARENASTRING_H__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// must be last: +#include + +#ifdef SWIG +#error "You cannot SWIG proto headers" +#endif + +namespace google +{ + namespace protobuf + { + namespace internal + { + class EpsCopyInputStream; + + class SwapFieldHelper; + + // Declared in message_lite.h + PROTOBUF_EXPORT extern ExplicitlyConstructedArenaString + fixed_address_empty_string; + + // Lazy string instance to support string fields with non-empty default. + // These are initialized on the first call to .get(). + class PROTOBUF_EXPORT LazyString + { + public: + // We explicitly make LazyString an aggregate so that MSVC can do constant + // initialization on it without marking it `constexpr`. + // We do not want to use `constexpr` because it makes it harder to have extern + // storage for it and causes library bloat. + struct InitValue + { + const char* ptr; + size_t size; + }; + // We keep a union of the initialization value and the std::string to save on + // space. We don't need the string array after Init() is done. + union + { + mutable InitValue init_value_; + alignas(std::string) mutable char string_buf_[sizeof(std::string)]; + }; + mutable std::atomic inited_; + + const std::string& get() const + { + // This check generates less code than a call-once invocation. + auto* res = inited_.load(std::memory_order_acquire); + if (PROTOBUF_PREDICT_FALSE(res == nullptr)) + return Init(); + return *res; + } + + private: + // Initialize the string in `string_buf_`, update `inited_` and return it. + // We return it here to avoid having to read it again in the inlined code. + const std::string& Init() const; + }; + + class TaggedStringPtr + { + public: + // Bit flags qualifying string properties. We can use 2 bits as + // ptr_ is guaranteed and enforced to be aligned on 4 byte boundaries. + enum Flags + { + kArenaBit = 0x1, // ptr is arena allocated + kMutableBit = 0x2, // ptr contents are fully mutable + kMask = 0x3 // Bit mask + }; + + // Composed logical types + enum Type + { + // Default strings are immutable and never owned. + kDefault = 0, + + // Allocated strings are mutable and (as the name implies) owned. + // A heap allocated string must be deleted. + kAllocated = kMutableBit, + + // Mutable arena strings are strings where the string instance is owned + // by the arena, but the string contents itself are owned by the string + // instance. Mutable arena string instances need to be destroyed which is + // typically done through a cleanup action added to the arena owning it. + kMutableArena = kArenaBit | kMutableBit, + + // Fixed size arena strings are strings where both the string instance and + // the string contents are fully owned by the arena. Fixed size arena + // strings are a platform and c++ library specific customization. Fixed + // size arena strings are immutable, with the exception of custom internal + // updates to the content that fit inside the existing capacity. + // Fixed size arena strings must never be deleted or destroyed. + kFixedSizeArena = kArenaBit, + }; + + TaggedStringPtr() = default; + explicit constexpr TaggedStringPtr(ExplicitlyConstructedArenaString* ptr) : + ptr_(ptr) + { + } + + // Sets the value to `p`, tagging the value as being a 'default' value. + // See documentation for kDefault for more info. + inline const std::string* SetDefault(const std::string* p) + { + return TagAs(kDefault, const_cast(p)); + } + + // Sets the value to `p`, tagging the value as a heap allocated value. + // Allocated strings are mutable and (as the name implies) owned. + // `p` must not be null + inline std::string* SetAllocated(std::string* p) + { + return TagAs(kAllocated, p); + } + + // Sets the value to `p`, tagging the value as a fixed size arena string. + // See documentation for kFixedSizeArena for more info. + // `p` must not be null + inline std::string* SetFixedSizeArena(std::string* p) + { + return TagAs(kFixedSizeArena, p); + } + + // Sets the value to `p`, tagging the value as a mutable arena string. + // See documentation for kMutableArena for more info. + // `p` must not be null + inline std::string* SetMutableArena(std::string* p) + { + return TagAs(kMutableArena, p); + } + + // Returns true if the contents of the current string are fully mutable. + inline bool IsMutable() const + { + return as_int() & kMutableBit; + } + + // Returns true if the current string is an immutable default value. + inline bool IsDefault() const + { + return (as_int() & kMask) == kDefault; + } + + // If the current string is a heap-allocated mutable value, returns a pointer + // to it. Returns nullptr otherwise. + inline std::string* GetIfAllocated() const + { + auto allocated = as_int() ^ kAllocated; + if (allocated & kMask) + return nullptr; + + auto ptr = reinterpret_cast(allocated); + PROTOBUF_ASSUME(ptr != nullptr); + return ptr; + } + + // Returns true if the current string is an arena allocated value. + // This means it's either a mutable or fixed size arena string. + inline bool IsArena() const + { + return as_int() & kArenaBit; + } + + // Returns true if the current string is a fixed size arena allocated value. + inline bool IsFixedSizeArena() const + { + return (as_int() & kMask) == kFixedSizeArena; + } + + // Returns the contained string pointer. + inline std::string* Get() const + { + return reinterpret_cast(as_int() & ~kMask); + } + + // Returns true if the contained pointer is null, indicating some error. + // The Null value is only used during parsing for temporary values. + // A persisted ArenaStringPtr value is never null. + inline bool IsNull() + { + return ptr_ == nullptr; + } + + private: + static inline void assert_aligned(const void* p) + { + GOOGLE_DCHECK_EQ(reinterpret_cast(p) & kMask, 0UL); + } + + inline std::string* TagAs(Type type, std::string* p) + { + GOOGLE_DCHECK(p != nullptr); + assert_aligned(p); + ptr_ = reinterpret_cast(reinterpret_cast(p) | type); + return p; + } + + uintptr_t as_int() const + { + return reinterpret_cast(ptr_); + } + void* ptr_; + }; + + static_assert(std::is_trivial::value, "TaggedStringPtr must be trivial"); + + // This class encapsulates a pointer to a std::string with or without arena + // owned contents, tagged by the bottom bits of the string pointer. It is a + // high-level wrapper that almost directly corresponds to the interface required + // by string fields in generated code. It replaces the old std::string* pointer + // in such cases. + // + // The string pointer is tagged to be either a default, externally owned value, + // a mutable heap allocated value, or an arena allocated value. The object uses + // a single global instance of an empty string that is used as the initial + // default value. Fields that have empty default values directly use this global + // default. Fields that have non empty default values are supported through + // lazily initialized default values managed by the LazyString class. + // + // Generated code and reflection code both ensure that ptr_ is never null. + // Because ArenaStringPtr is used in oneof unions, its constructor is a NOP and + // the field is always manually initialized via method calls. + // + // See TaggedStringPtr for more information about the types of string values + // being held, and the mutable and ownership invariants for each type. + struct PROTOBUF_EXPORT ArenaStringPtr + { + ArenaStringPtr() = default; + constexpr ArenaStringPtr(ExplicitlyConstructedArenaString* default_value, ConstantInitialized) : + tagged_ptr_(default_value) + { + } + + // Called from generated code / reflection runtime only. Resets value to point + // to a default string pointer, with the semantics that this ArenaStringPtr + // does not own the pointed-to memory. Disregards initial value of ptr_ (so + // this is the *ONLY* safe method to call after construction or when + // reinitializing after becoming the active field in a oneof union). + inline void InitDefault(); + + // Similar to `InitDefault` except that it allows the default value to be + // initialized to an externally owned string. This method is called from + // parsing code. `str` must not be null and outlive this instance. + inline void InitExternal(const std::string* str); + + // Called from generated code / reflection runtime only. Resets the value of + // this instances to the heap allocated value in `str`. `str` must not be + // null. Invokes `arena->Own(str)` to transfer ownership into the arena if + // `arena` is not null, else, `str` will be owned by ArenaStringPtr. This + // function should only be used to initialize a ArenaStringPtr or on an + // instance known to not carry any heap allocated value. + inline void InitAllocated(std::string* str, Arena* arena); + + void Set(ConstStringParam value, Arena* arena); + void Set(std::string&& value, Arena* arena); + void Set(const char* s, Arena* arena); + void Set(const char* s, size_t n, Arena* arena); + + void SetBytes(ConstStringParam value, Arena* arena); + void SetBytes(std::string&& value, Arena* arena); + void SetBytes(const char* s, Arena* arena); + void SetBytes(const void* p, size_t n, Arena* arena); + + template + void Set(std::reference_wrapper const_string_ref, ::google::protobuf::Arena* arena) + { + Set(const_string_ref.get(), arena); + } + + // Returns a mutable std::string reference. + // The version accepting a `LazyString` value is used in the generated code to + // initialize mutable copies for fields with a non-empty default where the + // default value is lazily initialized. + std::string* Mutable(Arena* arena); + std::string* Mutable(const LazyString& default_value, Arena* arena); + + // Gets a mutable pointer with unspecified contents. + // This function is identical to Mutable(), except it is optimized for the + // case where the caller is not interested in the current contents. For + // example, if the current field is not mutable, it will re-initialize the + // value with an empty string rather than a (non-empty) default value. + // Likewise, if the current value is a fixed size arena string with contents, + // it will be initialized into an empty mutable arena string. + std::string* MutableNoCopy(Arena* arena); + + // Basic accessors. + PROTOBUF_NDEBUG_INLINE const std::string& Get() const + { + // Unconditionally mask away the tag. + return *tagged_ptr_.Get(); + } + + // Returns a pointer to the stored contents for this instance. + // This method is for internal debugging and tracking purposes only. + PROTOBUF_NDEBUG_INLINE const std::string* UnsafeGetPointer() const + PROTOBUF_RETURNS_NONNULL + { + return tagged_ptr_.Get(); + } + + // Release returns a std::string* instance that is heap-allocated and is not + // Own()'d by any arena. If the field is not set, this returns nullptr. The + // caller retains ownership. Clears this field back to the default state. + // Used to implement release_() methods on generated classes. + PROTOBUF_NODISCARD std::string* Release(); + + // Takes a std::string that is heap-allocated, and takes ownership. The + // std::string's destructor is registered with the arena. Used to implement + // set_allocated_ in generated classes. + void SetAllocated(std::string* value, Arena* arena); + + // Frees storage (if not on an arena). + void Destroy(); + + // Clears content, but keeps allocated std::string, to avoid the overhead of + // heap operations. After this returns, the content (as seen by the user) will + // always be the empty std::string. Assumes that |default_value| is an empty + // std::string. + void ClearToEmpty(); + + // Clears content, assuming that the current value is not the empty + // string default. + void ClearNonDefaultToEmpty(); + + // Clears content, but keeps allocated std::string if arena != nullptr, to + // avoid the overhead of heap operations. After this returns, the content + // (as seen by the user) will always be equal to |default_value|. + void ClearToDefault(const LazyString& default_value, ::google::protobuf::Arena* arena); + + // Swaps internal pointers. Arena-safety semantics: this is guarded by the + // logic in Swap()/UnsafeArenaSwap() at the message level, so this method is + // 'unsafe' if called directly. + inline PROTOBUF_NDEBUG_INLINE static void InternalSwap(ArenaStringPtr* rhs, Arena* rhs_arena, ArenaStringPtr* lhs, Arena* lhs_arena); + + // Internal setter used only at parse time to directly set a donated string + // value. + void UnsafeSetTaggedPointer(TaggedStringPtr value) + { + tagged_ptr_ = value; + } + // Generated code only! An optimization, in certain cases the generated + // code is certain we can obtain a std::string with no default checks and + // tag tests. + std::string* UnsafeMutablePointer() PROTOBUF_RETURNS_NONNULL; + + // Returns true if this instances holds an immutable default value. + inline bool IsDefault() const + { + return tagged_ptr_.IsDefault(); + } + + private: + template + inline std::string* NewString(Arena* arena, Args&&... args) + { + if (arena == nullptr) + { + auto* s = new std::string(std::forward(args)...); + return tagged_ptr_.SetAllocated(s); + } + else + { + auto* s = Arena::Create(arena, std::forward(args)...); + return tagged_ptr_.SetMutableArena(s); + } + } + + TaggedStringPtr tagged_ptr_; + + bool IsFixedSizeArena() const + { + return false; + } + + // Swaps tagged pointer without debug hardening. This is to allow python + // protobuf to maintain pointer stability even in DEBUG builds. + inline PROTOBUF_NDEBUG_INLINE static void UnsafeShallowSwap( + ArenaStringPtr* rhs, ArenaStringPtr* lhs + ) + { + std::swap(lhs->tagged_ptr_, rhs->tagged_ptr_); + } + + friend class ::google::protobuf::internal::SwapFieldHelper; + friend class TcParser; + + // Slow paths. + + // MutableSlow requires that !IsString() || IsDefault + // Variadic to support 0 args for empty default and 1 arg for LazyString. + template + std::string* MutableSlow(::google::protobuf::Arena* arena, const Lazy&... lazy_default); + + friend class EpsCopyInputStream; + }; + + inline void ArenaStringPtr::InitDefault() + { + tagged_ptr_ = TaggedStringPtr(&fixed_address_empty_string); + } + + inline void ArenaStringPtr::InitExternal(const std::string* str) + { + tagged_ptr_.SetDefault(str); + } + + inline void ArenaStringPtr::InitAllocated(std::string* str, Arena* arena) + { + if (arena != nullptr) + { + tagged_ptr_.SetMutableArena(str); + arena->Own(str); + } + else + { + tagged_ptr_.SetAllocated(str); + } + } + + inline void ArenaStringPtr::Set(const char* s, Arena* arena) + { + Set(ConstStringParam{s}, arena); + } + + inline void ArenaStringPtr::Set(const char* s, size_t n, Arena* arena) + { + Set(ConstStringParam{s, n}, arena); + } + + inline void ArenaStringPtr::SetBytes(ConstStringParam value, Arena* arena) + { + Set(value, arena); + } + + inline void ArenaStringPtr::SetBytes(std::string&& value, Arena* arena) + { + Set(std::move(value), arena); + } + + inline void ArenaStringPtr::SetBytes(const char* s, Arena* arena) + { + Set(s, arena); + } + + inline void ArenaStringPtr::SetBytes(const void* p, size_t n, Arena* arena) + { + Set(ConstStringParam{static_cast(p), n}, arena); + } + + // Make sure rhs_arena allocated rhs, and lhs_arena allocated lhs. + inline PROTOBUF_NDEBUG_INLINE void ArenaStringPtr::InternalSwap( // + ArenaStringPtr* rhs, + Arena* rhs_arena, // + ArenaStringPtr* lhs, + Arena* lhs_arena + ) + { + // Silence unused variable warnings in release buildls. + (void)rhs_arena; + (void)lhs_arena; + std::swap(lhs->tagged_ptr_, rhs->tagged_ptr_); +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + auto force_realloc = [](ArenaStringPtr* p, Arena* arena) + { + if (p->IsDefault()) + return; + std::string* old_value = p->tagged_ptr_.Get(); + std::string* new_value = + p->IsFixedSizeArena() ? Arena::Create(arena, *old_value) : Arena::Create(arena, std::move(*old_value)); + if (arena == nullptr) + { + delete old_value; + p->tagged_ptr_.SetAllocated(new_value); + } + else + { + p->tagged_ptr_.SetMutableArena(new_value); + } + }; + // Because, at this point, tagged_ptr_ has been swapped, arena should also be + // swapped. + force_realloc(lhs, rhs_arena); + force_realloc(rhs, lhs_arena); +#endif // PROTOBUF_FORCE_COPY_IN_SWAP + } + + inline void ArenaStringPtr::ClearNonDefaultToEmpty() + { + // Unconditionally mask away the tag. + tagged_ptr_.Get()->clear(); + } + + inline std::string* ArenaStringPtr::UnsafeMutablePointer() + { + GOOGLE_DCHECK(tagged_ptr_.IsMutable()); + GOOGLE_DCHECK(tagged_ptr_.Get() != nullptr); + return tagged_ptr_.Get(); + } + + } // namespace internal + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_ARENASTRING_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/arenaz_sampler.h b/CAPI/cpp/grpc/include/google/protobuf/arenaz_sampler.h new file mode 100644 index 00000000..de0c1291 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/arenaz_sampler.h @@ -0,0 +1,245 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__ +#define GOOGLE_PROTOBUF_SRC_GOOGLE_PROTOBUF_ARENAZ_SAMPLER_H__ + +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace internal + { + +#if defined(PROTOBUF_ARENAZ_SAMPLE) + struct ThreadSafeArenaStats; + void RecordResetSlow(ThreadSafeArenaStats* info); + void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t requested, size_t allocated, size_t wasted); + // Stores information about a sampled thread safe arena. All mutations to this + // *must* be made through `Record*` functions below. All reads from this *must* + // only occur in the callback to `ThreadSafeArenazSampler::Iterate`. + struct ThreadSafeArenaStats : public absl::profiling_internal::Sample + { + // Constructs the object but does not fill in any fields. + ThreadSafeArenaStats(); + ~ThreadSafeArenaStats(); + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic num_allocations; + std::atomic num_resets; + std::atomic bytes_requested; + std::atomic bytes_allocated; + std::atomic bytes_wasted; + // Records the largest size an arena ever had. Maintained across resets. + std::atomic max_bytes_allocated; + // Bit i when set to 1 indicates that a thread with tid % 63 = i accessed the + // underlying arena. The field is maintained across resets. + std::atomic thread_ids; + + // All of the fields below are set by `PrepareForSampling`, they must not + // be mutated in `Record*` functions. They are logically `const` in that + // sense. These are guarded by init_mu, but that is not externalized to + // clients, who can only read them during + // `ThreadSafeArenazSampler::Iterate` which will hold the lock. + static constexpr int kMaxStackDepth = 64; + int32_t depth; + void* stack[kMaxStackDepth]; + static void RecordAllocateStats(ThreadSafeArenaStats* info, size_t requested, size_t allocated, size_t wasted) + { + if (PROTOBUF_PREDICT_TRUE(info == nullptr)) + return; + RecordAllocateSlow(info, requested, allocated, wasted); + } + }; + + ThreadSafeArenaStats* SampleSlow(int64_t* next_sample); + void UnsampleSlow(ThreadSafeArenaStats* info); + + class ThreadSafeArenaStatsHandle + { + public: + explicit ThreadSafeArenaStatsHandle() = default; + explicit ThreadSafeArenaStatsHandle(ThreadSafeArenaStats* info) : + info_(info) + { + } + + ~ThreadSafeArenaStatsHandle() + { + if (PROTOBUF_PREDICT_TRUE(info_ == nullptr)) + return; + UnsampleSlow(info_); + } + + ThreadSafeArenaStatsHandle(ThreadSafeArenaStatsHandle&& other) noexcept + : + info_(absl::exchange(other.info_, nullptr)) + { + } + + ThreadSafeArenaStatsHandle& operator=( + ThreadSafeArenaStatsHandle&& other + ) noexcept + { + if (PROTOBUF_PREDICT_FALSE(info_ != nullptr)) + { + UnsampleSlow(info_); + } + info_ = absl::exchange(other.info_, nullptr); + return *this; + } + + void RecordReset() + { + if (PROTOBUF_PREDICT_TRUE(info_ == nullptr)) + return; + RecordResetSlow(info_); + } + + ThreadSafeArenaStats* MutableStats() + { + return info_; + } + + friend void swap(ThreadSafeArenaStatsHandle& lhs, ThreadSafeArenaStatsHandle& rhs) + { + std::swap(lhs.info_, rhs.info_); + } + + friend class ThreadSafeArenaStatsHandlePeer; + + private: + ThreadSafeArenaStats* info_ = nullptr; + }; + + using ThreadSafeArenazSampler = + ::absl::profiling_internal::SampleRecorder; + + extern PROTOBUF_THREAD_LOCAL int64_t global_next_sample; + + // Returns an RAII sampling handle that manages registration and unregistation + // with the global sampler. + inline ThreadSafeArenaStatsHandle Sample() + { + if (PROTOBUF_PREDICT_TRUE(--global_next_sample > 0)) + { + return ThreadSafeArenaStatsHandle(nullptr); + } + return ThreadSafeArenaStatsHandle(SampleSlow(&global_next_sample)); + } + +#else + struct ThreadSafeArenaStats + { + static void RecordAllocateStats(ThreadSafeArenaStats*, size_t /*requested*/, size_t /*allocated*/, size_t /*wasted*/) + { + } + }; + + ThreadSafeArenaStats* SampleSlow(int64_t* next_sample); + void UnsampleSlow(ThreadSafeArenaStats* info); + + class ThreadSafeArenaStatsHandle + { + public: + explicit ThreadSafeArenaStatsHandle() = default; + explicit ThreadSafeArenaStatsHandle(ThreadSafeArenaStats*) + { + } + + void RecordReset() + { + } + + ThreadSafeArenaStats* MutableStats() + { + return nullptr; + } + + friend void swap(ThreadSafeArenaStatsHandle&, ThreadSafeArenaStatsHandle&) + { + } + + private: + friend class ThreadSafeArenaStatsHandlePeer; + }; + + class ThreadSafeArenazSampler + { + public: + void Unregister(ThreadSafeArenaStats*) + { + } + void SetMaxSamples(int32_t) + { + } + }; + + // Returns an RAII sampling handle that manages registration and unregistation + // with the global sampler. + inline ThreadSafeArenaStatsHandle Sample() + { + return ThreadSafeArenaStatsHandle(nullptr); + } +#endif // defined(PROTOBUF_ARENAZ_SAMPLE) + + // Returns a global Sampler. + ThreadSafeArenazSampler& GlobalThreadSafeArenazSampler(); + + // Enables or disables sampling for thread safe arenas. + void SetThreadSafeArenazEnabled(bool enabled); + + // Sets the rate at which thread safe arena will be sampled. + void SetThreadSafeArenazSampleParameter(int32_t rate); + + // Sets a soft max for the number of samples that will be kept. + void SetThreadSafeArenazMaxSamples(int32_t max); + + // Sets the current value for when arenas should be next sampled. + void SetThreadSafeArenazGlobalNextSample(int64_t next_sample); + + } // namespace internal + } // namespace protobuf +} // namespace google + +#include +#endif // GOOGLE_PROTOBUF_SRC_PROTOBUF_ARENAZ_SAMPLER_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/code_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/code_generator.h new file mode 100644 index 00000000..ab9bad03 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/code_generator.h @@ -0,0 +1,218 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Defines the abstract interface implemented by each of the language-specific +// code generators. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__ + +#include +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + namespace io + { + class ZeroCopyOutputStream; + } + class FileDescriptor; + class GeneratedCodeInfo; + + namespace compiler + { + class AccessInfoMap; + + class Version; + + // Defined in this file. + class CodeGenerator; + class GeneratorContext; + + // The abstract interface to a class which generates code implementing a + // particular proto file in a particular language. A number of these may + // be registered with CommandLineInterface to support various languages. + class PROTOC_EXPORT CodeGenerator + { + public: + inline CodeGenerator() + { + } + virtual ~CodeGenerator(); + + // Generates code for the given proto file, generating one or more files in + // the given output directory. + // + // A parameter to be passed to the generator can be specified on the command + // line. This is intended to be used to pass generator specific parameters. + // It is empty if no parameter was given. ParseGeneratorParameter (below), + // can be used to accept multiple parameters within the single parameter + // command line flag. + // + // Returns true if successful. Otherwise, sets *error to a description of + // the problem (e.g. "invalid parameter") and returns false. + virtual bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const = 0; + + // Generates code for all given proto files. + // + // WARNING: The canonical code generator design produces one or two output + // files per input .proto file, and we do not wish to encourage alternate + // designs. + // + // A parameter is given as passed on the command line, as in |Generate()| + // above. + // + // Returns true if successful. Otherwise, sets *error to a description of + // the problem (e.g. "invalid parameter") and returns false. + virtual bool GenerateAll(const std::vector& files, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const; + + // This must be kept in sync with plugin.proto. See that file for + // documentation on each value. + enum Feature + { + FEATURE_PROTO3_OPTIONAL = 1, + }; + + // Implement this to indicate what features this code generator supports. + // + // This must be a bitwise OR of values from the Feature enum above (or zero). + virtual uint64_t GetSupportedFeatures() const + { + return 0; + } + + // This is no longer used, but this class is part of the opensource protobuf + // library, so it has to remain to keep vtables the same for the current + // version of the library. When protobufs does a api breaking change, the + // method can be removed. + virtual bool HasGenerateAll() const + { + return true; + } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodeGenerator); + }; + + // CodeGenerators generate one or more files in a given directory. This + // abstract interface represents the directory to which the CodeGenerator is + // to write and other information about the context in which the Generator + // runs. + class PROTOC_EXPORT GeneratorContext + { + public: + inline GeneratorContext() + { + } + virtual ~GeneratorContext(); + + // Opens the given file, truncating it if it exists, and returns a + // ZeroCopyOutputStream that writes to the file. The caller takes ownership + // of the returned object. This method never fails (a dummy stream will be + // returned instead). + // + // The filename given should be relative to the root of the source tree. + // E.g. the C++ generator, when generating code for "foo/bar.proto", will + // generate the files "foo/bar.pb.h" and "foo/bar.pb.cc"; note that + // "foo/" is included in these filenames. The filename is not allowed to + // contain "." or ".." components. + virtual io::ZeroCopyOutputStream* Open(const std::string& filename) = 0; + + // Similar to Open() but the output will be appended to the file if exists + virtual io::ZeroCopyOutputStream* OpenForAppend(const std::string& filename); + + // Creates a ZeroCopyOutputStream which will insert code into the given file + // at the given insertion point. See plugin.proto (plugin.pb.h) for more + // information on insertion points. The default implementation + // assert-fails -- it exists only for backwards-compatibility. + // + // WARNING: This feature is currently EXPERIMENTAL and is subject to change. + virtual io::ZeroCopyOutputStream* OpenForInsert( + const std::string& filename, const std::string& insertion_point + ); + + // Similar to OpenForInsert, but if `info` is non-empty, will open (or create) + // filename.pb.meta and insert info at the appropriate place with the + // necessary shifts. The default implementation ignores `info`. + // + // WARNING: This feature will be REMOVED in the near future. + virtual io::ZeroCopyOutputStream* OpenForInsertWithGeneratedCodeInfo( + const std::string& filename, const std::string& insertion_point, const google::protobuf::GeneratedCodeInfo& info + ); + + // Returns a vector of FileDescriptors for all the files being compiled + // in this run. Useful for languages, such as Go, that treat files + // differently when compiled as a set rather than individually. + virtual void ListParsedFiles(std::vector* output); + + // Retrieves the version number of the protocol compiler associated with + // this GeneratorContext. + virtual void GetCompilerVersion(Version* version) const; + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GeneratorContext); + }; + + // The type GeneratorContext was once called OutputDirectory. This typedef + // provides backward compatibility. + typedef GeneratorContext OutputDirectory; + + // Several code generators treat the parameter argument as holding a + // list of options separated by commas. This helper function parses + // a set of comma-delimited name/value pairs: e.g., + // "foo=bar,baz,moo=corge" + // parses to the pairs: + // ("foo", "bar"), ("baz", ""), ("moo", "corge") + PROTOC_EXPORT void ParseGeneratorParameter( + const std::string&, std::vector>* + ); + + // Strips ".proto" or ".protodevel" from the end of a filename. + PROTOC_EXPORT std::string StripProto(const std::string& filename); + + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/command_line_interface.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/command_line_interface.h new file mode 100644 index 00000000..cc33f525 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/command_line_interface.h @@ -0,0 +1,466 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Implements the Protocol Compiler front-end such that it may be reused by +// custom compilers written to support other languages. + +#ifndef GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__ +#define GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + class Descriptor; // descriptor.h + class DescriptorDatabase; // descriptor_database.h + class DescriptorPool; // descriptor.h + class FileDescriptor; // descriptor.h + class FileDescriptorSet; // descriptor.h + class FileDescriptorProto; // descriptor.pb.h + template + class RepeatedPtrField; // repeated_field.h + class SimpleDescriptorDatabase; // descriptor_database.h + + namespace compiler + { + + class CodeGenerator; // code_generator.h + class GeneratorContext; // code_generator.h + class DiskSourceTree; // importer.h + + // This class implements the command-line interface to the protocol compiler. + // It is designed to make it very easy to create a custom protocol compiler + // supporting the languages of your choice. For example, if you wanted to + // create a custom protocol compiler binary which includes both the regular + // C++ support plus support for your own custom output "Foo", you would + // write a class "FooGenerator" which implements the CodeGenerator interface, + // then write a main() procedure like this: + // + // int main(int argc, char* argv[]) { + // google::protobuf::compiler::CommandLineInterface cli; + // + // // Support generation of C++ source and headers. + // google::protobuf::compiler::cpp::CppGenerator cpp_generator; + // cli.RegisterGenerator("--cpp_out", &cpp_generator, + // "Generate C++ source and header."); + // + // // Support generation of Foo code. + // FooGenerator foo_generator; + // cli.RegisterGenerator("--foo_out", &foo_generator, + // "Generate Foo file."); + // + // return cli.Run(argc, argv); + // } + // + // The compiler is invoked with syntax like: + // protoc --cpp_out=outdir --foo_out=outdir --proto_path=src src/foo.proto + // + // The .proto file to compile can be specified on the command line using either + // its physical file path, or a virtual path relative to a directory specified + // in --proto_path. For example, for src/foo.proto, the following two protoc + // invocations work the same way: + // 1. protoc --proto_path=src src/foo.proto (physical file path) + // 2. protoc --proto_path=src foo.proto (virtual path relative to src) + // + // If a file path can be interpreted both as a physical file path and as a + // relative virtual path, the physical file path takes precedence. + // + // For a full description of the command-line syntax, invoke it with --help. + class PROTOC_EXPORT CommandLineInterface + { + public: + static const char* const kPathSeparator; + + CommandLineInterface(); + ~CommandLineInterface(); + + // Register a code generator for a language. + // + // Parameters: + // * flag_name: The command-line flag used to specify an output file of + // this type. The name must start with a '-'. If the name is longer + // than one letter, it must start with two '-'s. + // * generator: The CodeGenerator which will be called to generate files + // of this type. + // * help_text: Text describing this flag in the --help output. + // + // Some generators accept extra parameters. You can specify this parameter + // on the command-line by placing it before the output directory, separated + // by a colon: + // protoc --foo_out=enable_bar:outdir + // The text before the colon is passed to CodeGenerator::Generate() as the + // "parameter". + void RegisterGenerator(const std::string& flag_name, CodeGenerator* generator, const std::string& help_text); + + // Register a code generator for a language. + // Besides flag_name you can specify another option_flag_name that could be + // used to pass extra parameters to the registered code generator. + // Suppose you have registered a generator by calling: + // command_line_interface.RegisterGenerator("--foo_out", "--foo_opt", ...) + // Then you could invoke the compiler with a command like: + // protoc --foo_out=enable_bar:outdir --foo_opt=enable_baz + // This will pass "enable_bar,enable_baz" as the parameter to the generator. + void RegisterGenerator(const std::string& flag_name, const std::string& option_flag_name, CodeGenerator* generator, const std::string& help_text); + + // Enables "plugins". In this mode, if a command-line flag ends with "_out" + // but does not match any registered generator, the compiler will attempt to + // find a "plugin" to implement the generator. Plugins are just executables. + // They should live somewhere in the PATH. + // + // The compiler determines the executable name to search for by concatenating + // exe_name_prefix with the unrecognized flag name, removing "_out". So, for + // example, if exe_name_prefix is "protoc-" and you pass the flag --foo_out, + // the compiler will try to run the program "protoc-gen-foo". + // + // The plugin program should implement the following usage: + // plugin [--out=OUTDIR] [--parameter=PARAMETER] PROTO_FILES < DESCRIPTORS + // --out indicates the output directory (as passed to the --foo_out + // parameter); if omitted, the current directory should be used. --parameter + // gives the generator parameter, if any was provided (see below). The + // PROTO_FILES list the .proto files which were given on the compiler + // command-line; these are the files for which the plugin is expected to + // generate output code. Finally, DESCRIPTORS is an encoded FileDescriptorSet + // (as defined in descriptor.proto). This is piped to the plugin's stdin. + // The set will include descriptors for all the files listed in PROTO_FILES as + // well as all files that they import. The plugin MUST NOT attempt to read + // the PROTO_FILES directly -- it must use the FileDescriptorSet. + // + // The plugin should generate whatever files are necessary, as code generators + // normally do. It should write the names of all files it generates to + // stdout. The names should be relative to the output directory, NOT absolute + // names or relative to the current directory. If any errors occur, error + // messages should be written to stderr. If an error is fatal, the plugin + // should exit with a non-zero exit code. + // + // Plugins can have generator parameters similar to normal built-in + // generators. Extra generator parameters can be passed in via a matching + // "_opt" parameter. For example: + // protoc --plug_out=enable_bar:outdir --plug_opt=enable_baz + // This will pass "enable_bar,enable_baz" as the parameter to the plugin. + // + void AllowPlugins(const std::string& exe_name_prefix); + + // Run the Protocol Compiler with the given command-line parameters. + // Returns the error code which should be returned by main(). + // + // It may not be safe to call Run() in a multi-threaded environment because + // it calls strerror(). I'm not sure why you'd want to do this anyway. + int Run(int argc, const char* const argv[]); + + // DEPRECATED. Calling this method has no effect. Protocol compiler now + // always try to find the .proto file relative to the current directory + // first and if the file is not found, it will then treat the input path + // as a virtual path. + void SetInputsAreProtoPathRelative(bool /* enable */) + { + } + + // Provides some text which will be printed when the --version flag is + // used. The version of libprotoc will also be printed on the next line + // after this text. + void SetVersionInfo(const std::string& text) + { + version_info_ = text; + } + + private: + // ----------------------------------------------------------------- + + class ErrorPrinter; + class GeneratorContextImpl; + class MemoryOutputStream; + typedef std::unordered_map> + GeneratorContextMap; + + // Clear state from previous Run(). + void Clear(); + + // Remaps the proto file so that it is relative to one of the directories + // in proto_path_. Returns false if an error occurred. + bool MakeProtoProtoPathRelative(DiskSourceTree* source_tree, std::string* proto, DescriptorDatabase* fallback_database); + + // Remaps each file in input_files_ so that it is relative to one of the + // directories in proto_path_. Returns false if an error occurred. + bool MakeInputsBeProtoPathRelative(DiskSourceTree* source_tree, DescriptorDatabase* fallback_database); + + // Fails if these files use proto3 optional and the code generator doesn't + // support it. This is a permanent check. + bool EnforceProto3OptionalSupport( + const std::string& codegen_name, uint64_t supported_features, const std::vector& parsed_files + ) const; + + // Return status for ParseArguments() and InterpretArgument(). + enum ParseArgumentStatus + { + PARSE_ARGUMENT_DONE_AND_CONTINUE, + PARSE_ARGUMENT_DONE_AND_EXIT, + PARSE_ARGUMENT_FAIL + }; + + // Parse all command-line arguments. + ParseArgumentStatus ParseArguments(int argc, const char* const argv[]); + + // Read an argument file and append the file's content to the list of + // arguments. Return false if the file cannot be read. + bool ExpandArgumentFile(const std::string& file, std::vector* arguments); + + // Parses a command-line argument into a name/value pair. Returns + // true if the next argument in the argv should be used as the value, + // false otherwise. + // + // Examples: + // "-Isrc/protos" -> + // name = "-I", value = "src/protos" + // "--cpp_out=src/foo.pb2.cc" -> + // name = "--cpp_out", value = "src/foo.pb2.cc" + // "foo.proto" -> + // name = "", value = "foo.proto" + bool ParseArgument(const char* arg, std::string* name, std::string* value); + + // Interprets arguments parsed with ParseArgument. + ParseArgumentStatus InterpretArgument(const std::string& name, const std::string& value); + + // Print the --help text to stderr. + void PrintHelpText(); + + // Loads proto_path_ into the provided source_tree. + bool InitializeDiskSourceTree(DiskSourceTree* source_tree, DescriptorDatabase* fallback_database); + + // Verify that all the input files exist in the given database. + bool VerifyInputFilesInDescriptors(DescriptorDatabase* fallback_database); + + // Parses input_files_ into parsed_files + bool ParseInputFiles(DescriptorPool* descriptor_pool, DiskSourceTree* source_tree, std::vector* parsed_files); + + // Generate the given output file from the given input. + struct OutputDirective; // see below + bool GenerateOutput(const std::vector& parsed_files, const OutputDirective& output_directive, GeneratorContext* generator_context); + bool GeneratePluginOutput( + const std::vector& parsed_files, + const std::string& plugin_name, + const std::string& parameter, + GeneratorContext* generator_context, + std::string* error + ); + + // Implements --encode and --decode. + bool EncodeOrDecode(const DescriptorPool* pool); + + // Implements the --descriptor_set_out option. + bool WriteDescriptorSet( + const std::vector& parsed_files + ); + + // Implements the --dependency_out option + bool GenerateDependencyManifestFile( + const std::vector& parsed_files, + const GeneratorContextMap& output_directories, + DiskSourceTree* source_tree + ); + + // Get all transitive dependencies of the given file (including the file + // itself), adding them to the given list of FileDescriptorProtos. The + // protos will be ordered such that every file is listed before any file that + // depends on it, so that you can call DescriptorPool::BuildFile() on them + // in order. Any files in *already_seen will not be added, and each file + // added will be inserted into *already_seen. If include_source_code_info is + // true then include the source code information in the FileDescriptorProtos. + // If include_json_name is true, populate the json_name field of + // FieldDescriptorProto for all fields. + static void GetTransitiveDependencies( + const FileDescriptor* file, bool include_json_name, bool include_source_code_info, std::set* already_seen, RepeatedPtrField* output + ); + + // Implements the --print_free_field_numbers. This function prints free field + // numbers into stdout for the message and it's nested message types in + // post-order, i.e. nested types first. Printed range are left-right + // inclusive, i.e. [a, b]. + // + // Groups: + // For historical reasons, groups are considered to share the same + // field number space with the parent message, thus it will not print free + // field numbers for groups. The field numbers used in the groups are + // excluded in the free field numbers of the parent message. + // + // Extension Ranges: + // Extension ranges are considered ocuppied field numbers and they will not be + // listed as free numbers in the output. + void PrintFreeFieldNumbers(const Descriptor* descriptor); + + // ----------------------------------------------------------------- + + // The name of the executable as invoked (i.e. argv[0]). + std::string executable_name_; + + // Version info set with SetVersionInfo(). + std::string version_info_; + + // Registered generators. + struct GeneratorInfo + { + std::string flag_name; + std::string option_flag_name; + CodeGenerator* generator; + std::string help_text; + }; + typedef std::map GeneratorMap; + GeneratorMap generators_by_flag_name_; + GeneratorMap generators_by_option_name_; + // A map from generator names to the parameters specified using the option + // flag. For example, if the user invokes the compiler with: + // protoc --foo_out=outputdir --foo_opt=enable_bar ... + // Then there will be an entry ("--foo_out", "enable_bar") in this map. + std::map generator_parameters_; + // Similar to generator_parameters_, but stores the parameters for plugins. + std::map plugin_parameters_; + + // See AllowPlugins(). If this is empty, plugins aren't allowed. + std::string plugin_prefix_; + + // Maps specific plugin names to files. When executing a plugin, this map + // is searched first to find the plugin executable. If not found here, the + // PATH (or other OS-specific search strategy) is searched. + std::map plugins_; + + // Stuff parsed from command line. + enum Mode + { + MODE_COMPILE, // Normal mode: parse .proto files and compile them. + MODE_ENCODE, // --encode: read text from stdin, write binary to stdout. + MODE_DECODE, // --decode: read binary from stdin, write text to stdout. + MODE_PRINT, // Print mode: print info of the given .proto files and exit. + }; + + Mode mode_ = MODE_COMPILE; + + enum PrintMode + { + PRINT_NONE, // Not in MODE_PRINT + PRINT_FREE_FIELDS, // --print_free_fields + }; + + PrintMode print_mode_ = PRINT_NONE; + + enum ErrorFormat + { + ERROR_FORMAT_GCC, // GCC error output format (default). + ERROR_FORMAT_MSVS // Visual Studio output (--error_format=msvs). + }; + + ErrorFormat error_format_ = ERROR_FORMAT_GCC; + + // True if we should treat warnings as errors that fail the compilation. + bool fatal_warnings_ = false; + + std::vector> + proto_path_; // Search path for proto files. + std::vector input_files_; // Names of the input proto files. + + // Names of proto files which are allowed to be imported. Used by build + // systems to enforce depend-on-what-you-import. + std::set direct_dependencies_; + bool direct_dependencies_explicitly_set_ = false; + + // If there's a violation of depend-on-what-you-import, this string will be + // presented to the user. "%s" will be replaced with the violating import. + std::string direct_dependencies_violation_msg_; + + // output_directives_ lists all the files we are supposed to output and what + // generator to use for each. + struct OutputDirective + { + std::string name; // E.g. "--foo_out" + CodeGenerator* generator; // NULL for plugins + std::string parameter; + std::string output_location; + }; + std::vector output_directives_; + + // When using --encode or --decode, this names the type we are encoding or + // decoding. (Empty string indicates --decode_raw.) + std::string codec_type_; + + // If --descriptor_set_in was given, these are filenames containing + // parsed FileDescriptorSets to be used for loading protos. Otherwise, empty. + std::vector descriptor_set_in_names_; + + // If --descriptor_set_out was given, this is the filename to which the + // FileDescriptorSet should be written. Otherwise, empty. + std::string descriptor_set_out_name_; + + // If --dependency_out was given, this is the path to the file where the + // dependency file will be written. Otherwise, empty. + std::string dependency_out_name_; + + // True if --include_imports was given, meaning that we should + // write all transitive dependencies to the DescriptorSet. Otherwise, only + // the .proto files listed on the command-line are added. + bool imports_in_descriptor_set_; + + // True if --include_source_info was given, meaning that we should not strip + // SourceCodeInfo from the DescriptorSet. + bool source_info_in_descriptor_set_ = false; + + // Was the --disallow_services flag used? + bool disallow_services_ = false; + + // When using --encode, this will be passed to SetSerializationDeterministic. + bool deterministic_output_ = false; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CommandLineInterface); + }; + + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/cpp_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/cpp_generator.h new file mode 100644 index 00000000..1716ab20 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/cpp_generator.h @@ -0,0 +1,6 @@ +#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_CPP_GENERATOR_H_ +#define GOOGLE_PROTOBUF_COMPILER_CPP_CPP_GENERATOR_H_ + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CPP_CPP_GENERATOR_H_ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/file.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/file.h new file mode 100644 index 00000000..85951239 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/file.h @@ -0,0 +1,219 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_FILE_H__ +#define GOOGLE_PROTOBUF_COMPILER_CPP_FILE_H__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace google +{ + namespace protobuf + { + class FileDescriptor; // descriptor.h + namespace io + { + class Printer; // printer.h + } + } // namespace protobuf +} // namespace google + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace cpp + { + + class EnumGenerator; // enum.h + class MessageGenerator; // message.h + class ServiceGenerator; // service.h + class ExtensionGenerator; // extension.h + + class FileGenerator + { + public: + // See generator.cc for the meaning of dllexport_decl. + FileGenerator(const FileDescriptor* file, const Options& options); + ~FileGenerator(); + + // Shared code between the two header generators below. + void GenerateHeader(io::Printer* printer); + + // info_path, if non-empty, should be the path (relative to printer's + // output) to the metadata file describing this proto header. + void GenerateProtoHeader(io::Printer* printer, const std::string& info_path); + // info_path, if non-empty, should be the path (relative to printer's + // output) to the metadata file describing this PB header. + void GeneratePBHeader(io::Printer* printer, const std::string& info_path); + void GenerateSource(io::Printer* printer); + + // The following member functions are used when the lite_implicit_weak_fields + // option is set. In this mode the code is organized a bit differently to + // promote better linker stripping of unused code. In particular, we generate + // one .cc file per message, one .cc file per extension, and a main pb.cc file + // containing everything else. + + int NumMessages() const + { + return message_generators_.size(); + } + int NumExtensions() const + { + return extension_generators_.size(); + } + // Generates the source file for one message. + void GenerateSourceForMessage(int idx, io::Printer* printer); + // Generates the source file for one extension. + void GenerateSourceForExtension(int idx, io::Printer* printer); + // Generates a source file containing everything except messages and + // extensions. + void GenerateGlobalSource(io::Printer* printer); + + private: + // Internal type used by GenerateForwardDeclarations (defined in file.cc). + class ForwardDeclarations; + struct CrossFileReferences; + + void IncludeFile(const std::string& google3_name, io::Printer* printer) + { + DoIncludeFile(google3_name, false, printer); + } + void IncludeFileAndExport(const std::string& google3_name, io::Printer* printer) + { + DoIncludeFile(google3_name, true, printer); + } + void DoIncludeFile(const std::string& google3_name, bool do_export, io::Printer* printer); + + std::string CreateHeaderInclude(const std::string& basename, const FileDescriptor* file); + void GetCrossFileReferencesForField(const FieldDescriptor* field, CrossFileReferences* refs); + void GetCrossFileReferencesForFile(const FileDescriptor* file, CrossFileReferences* refs); + void GenerateInternalForwardDeclarations(const CrossFileReferences& refs, io::Printer* printer); + void GenerateSourceIncludes(io::Printer* printer); + void GenerateSourcePrelude(io::Printer* printer); + void GenerateSourceDefaultInstance(int idx, io::Printer* printer); + + void GenerateInitForSCC(const SCC* scc, const CrossFileReferences& refs, io::Printer* printer); + void GenerateReflectionInitializationCode(io::Printer* printer); + + // For other imports, generates their forward-declarations. + void GenerateForwardDeclarations(io::Printer* printer); + + // Generates top or bottom of a header file. + void GenerateTopHeaderGuard(io::Printer* printer, bool pb_h); + void GenerateBottomHeaderGuard(io::Printer* printer, bool pb_h); + + // Generates #include directives. + void GenerateLibraryIncludes(io::Printer* printer); + void GenerateDependencyIncludes(io::Printer* printer); + + // Generate a pragma to pull in metadata using the given info_path (if + // non-empty). info_path should be relative to printer's output. + void GenerateMetadataPragma(io::Printer* printer, const std::string& info_path); + + // Generates a couple of different pieces before definitions: + void GenerateGlobalStateFunctionDeclarations(io::Printer* printer); + + // Generates types for classes. + void GenerateMessageDefinitions(io::Printer* printer); + + void GenerateEnumDefinitions(io::Printer* printer); + + // Generates generic service definitions. + void GenerateServiceDefinitions(io::Printer* printer); + + // Generates extension identifiers. + void GenerateExtensionIdentifiers(io::Printer* printer); + + // Generates inline function definitions. + void GenerateInlineFunctionDefinitions(io::Printer* printer); + + void GenerateProto2NamespaceEnumSpecializations(io::Printer* printer); + + // Sometimes the names we use in a .proto file happen to be defined as + // macros on some platforms (e.g., macro/minor used in plugin.proto are + // defined as macros in sys/types.h on FreeBSD and a few other platforms). + // To make the generated code compile on these platforms, we either have to + // undef the macro for these few platforms, or rename the field name for all + // platforms. Since these names are part of protobuf public API, renaming is + // generally a breaking change so we prefer the #undef approach. + void GenerateMacroUndefs(io::Printer* printer); + + bool IsDepWeak(const FileDescriptor* dep) const + { + if (weak_deps_.count(dep) != 0) + { + GOOGLE_CHECK(!options_.opensource_runtime); + return true; + } + return false; + } + + std::set weak_deps_; + + const FileDescriptor* file_; + const Options options_; + + MessageSCCAnalyzer scc_analyzer_; + + std::map variables_; + + // Contains the post-order walk of all the messages (and child messages) in + // this file. If you need a pre-order walk just reverse iterate. + std::vector> message_generators_; + std::vector> enum_generators_; + std::vector> service_generators_; + std::vector> extension_generators_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FileGenerator); + }; + + } // namespace cpp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_COMPILER_CPP_FILE_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/generator.h new file mode 100644 index 00000000..c23a6ebc --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/generator.h @@ -0,0 +1,114 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Generates C++ code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__ + +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace cpp + { + + // CodeGenerator implementation which generates a C++ source file and + // header. If you create your own protocol compiler binary and you want + // it to support C++ output, you can do so by registering an instance of this + // CodeGenerator with the CommandLineInterface in your main() function. + class PROTOC_EXPORT CppGenerator : public CodeGenerator + { + public: + CppGenerator(); + ~CppGenerator() override; + + enum class Runtime + { + kGoogle3, // Use the internal google3 runtime. + kOpensource, // Use the open-source runtime. + + // Use the open-source runtime with google3 #include paths. We make these + // absolute to avoid ambiguity, so the runtime will be #included like: + // #include "third_party/protobuf/.../google/protobuf/message.h" + kOpensourceGoogle3 + }; + + void set_opensource_runtime(bool opensource) + { + opensource_runtime_ = opensource; + } + + // If set to a non-empty string, generated code will do: + // #include "/google/protobuf/message.h" + // instead of: + // #include + // This has no effect if opensource_runtime = false. + void set_runtime_include_base(const std::string& base) + { + runtime_include_base_ = base; + } + + // implements CodeGenerator ---------------------------------------- + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override + { + // We don't fully support this yet, but this is needed to unblock the tests, + // and we will have full support before the experimental flag is removed. + return FEATURE_PROTO3_OPTIONAL; + } + + private: + bool opensource_runtime_ = true; + std::string runtime_include_base_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CppGenerator); + }; + + } // namespace cpp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/helpers.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/helpers.h new file mode 100644 index 00000000..54ecb711 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/helpers.h @@ -0,0 +1,1187 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_HELPERS_H__ +#define GOOGLE_PROTOBUF_COMPILER_CPP_HELPERS_H__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace cpp + { + + enum class ArenaDtorNeeds + { + kNone = 0, + kOnDemand = 1, + kRequired = 2 + }; + + inline std::string ProtobufNamespace(const Options& /* options */) + { + return "PROTOBUF_NAMESPACE_ID"; + } + + inline std::string MacroPrefix(const Options& /* options */) + { + return "GOOGLE_PROTOBUF"; + } + + inline std::string DeprecatedAttribute(const Options& /* options */, const FieldDescriptor* d) + { + return d->options().deprecated() ? "PROTOBUF_DEPRECATED " : ""; + } + + inline std::string DeprecatedAttribute(const Options& /* options */, const EnumValueDescriptor* d) + { + return d->options().deprecated() ? "PROTOBUF_DEPRECATED_ENUM " : ""; + } + + // Commonly-used separator comments. Thick is a line of '=', thin is a line + // of '-'. + extern const char kThickSeparator[]; + extern const char kThinSeparator[]; + + void SetCommonVars(const Options& options, std::map* variables); + + // Variables to access message data from the message scope. + void SetCommonMessageDataVariables( + const Descriptor* descriptor, + std::map* variables + ); + + void SetUnknownFieldsVariable(const Descriptor* descriptor, const Options& options, std::map* variables); + + bool GetBootstrapBasename(const Options& options, const std::string& basename, std::string* bootstrap_basename); + bool MaybeBootstrap(const Options& options, GeneratorContext* generator_context, bool bootstrap_flag, std::string* basename); + bool IsBootstrapProto(const Options& options, const FileDescriptor* file); + + // Name space of the proto file. This namespace is such that the string + // "::some_name" is the correct fully qualified namespace. + // This means if the package is empty the namespace is "", and otherwise + // the namespace is "::foo::bar::...::baz" without trailing semi-colons. + std::string Namespace(const FileDescriptor* d, const Options& options); + std::string Namespace(const Descriptor* d, const Options& options); + std::string Namespace(const FieldDescriptor* d, const Options& options); + std::string Namespace(const EnumDescriptor* d, const Options& options); + + // Returns true if it's safe to reset "field" to zero. + bool CanInitializeByZeroing(const FieldDescriptor* field); + + std::string ClassName(const Descriptor* descriptor); + std::string ClassName(const EnumDescriptor* enum_descriptor); + + std::string QualifiedClassName(const Descriptor* d, const Options& options); + std::string QualifiedClassName(const EnumDescriptor* d, const Options& options); + + std::string QualifiedClassName(const Descriptor* d); + std::string QualifiedClassName(const EnumDescriptor* d); + + // DEPRECATED just use ClassName or QualifiedClassName, a boolean is very + // unreadable at the callsite. + // Returns the non-nested type name for the given type. If "qualified" is + // true, prefix the type with the full namespace. For example, if you had: + // package foo.bar; + // message Baz { message Moo {} } + // Then the qualified ClassName for Moo would be: + // ::foo::bar::Baz_Moo + // While the non-qualified version would be: + // Baz_Moo + inline std::string ClassName(const Descriptor* descriptor, bool qualified) + { + return qualified ? QualifiedClassName(descriptor, Options()) : ClassName(descriptor); + } + + inline std::string ClassName(const EnumDescriptor* descriptor, bool qualified) + { + return qualified ? QualifiedClassName(descriptor, Options()) : ClassName(descriptor); + } + + // Returns the extension name prefixed with the class name if nested but without + // the package name. + std::string ExtensionName(const FieldDescriptor* d); + + std::string QualifiedExtensionName(const FieldDescriptor* d, const Options& options); + std::string QualifiedExtensionName(const FieldDescriptor* d); + + // Type name of default instance. + std::string DefaultInstanceType(const Descriptor* descriptor, const Options& options, bool split = false); + + // Non-qualified name of the default_instance of this message. + std::string DefaultInstanceName(const Descriptor* descriptor, const Options& options, bool split = false); + + // Non-qualified name of the default instance pointer. This is used only for + // implicit weak fields, where we need an extra indirection. + std::string DefaultInstancePtr(const Descriptor* descriptor, const Options& options, bool split = false); + + // Fully qualified name of the default_instance of this message. + std::string QualifiedDefaultInstanceName(const Descriptor* descriptor, const Options& options, bool split = false); + + // Fully qualified name of the default instance pointer. + std::string QualifiedDefaultInstancePtr(const Descriptor* descriptor, const Options& options, bool split = false); + + // DescriptorTable variable name. + std::string DescriptorTableName(const FileDescriptor* file, const Options& options); + + // When declaring symbol externs from another file, this macro will supply the + // dllexport needed for the target file, if any. + std::string FileDllExport(const FileDescriptor* file, const Options& options); + + // Name of the base class: google::protobuf::Message or google::protobuf::MessageLite. + std::string SuperClassName(const Descriptor* descriptor, const Options& options); + + // Adds an underscore if necessary to prevent conflicting with a keyword. + std::string ResolveKeyword(const std::string& name); + + // Get the (unqualified) name that should be used for this field in C++ code. + // The name is coerced to lower-case to emulate proto1 behavior. People + // should be using lowercase-with-underscores style for proto field names + // anyway, so normally this just returns field->name(). + std::string FieldName(const FieldDescriptor* field); + + // Returns the (unqualified) private member name for this field in C++ code. + std::string FieldMemberName(const FieldDescriptor* field, bool split); + + // Returns an estimate of the compiler's alignment for the field. This + // can't guarantee to be correct because the generated code could be compiled on + // different systems with different alignment rules. The estimates below assume + // 64-bit pointers. + int EstimateAlignmentSize(const FieldDescriptor* field); + + // Get the unqualified name that should be used for a field's field + // number constant. + std::string FieldConstantName(const FieldDescriptor* field); + + // Returns the scope where the field was defined (for extensions, this is + // different from the message type to which the field applies). + inline const Descriptor* FieldScope(const FieldDescriptor* field) + { + return field->is_extension() ? field->extension_scope() : field->containing_type(); + } + + // Returns the fully-qualified type name field->message_type(). Usually this + // is just ClassName(field->message_type(), true); + std::string FieldMessageTypeName(const FieldDescriptor* field, const Options& options); + + // Get the C++ type name for a primitive type (e.g. "double", "::google::protobuf::int32", etc.). + const char* PrimitiveTypeName(FieldDescriptor::CppType type); + std::string PrimitiveTypeName(const Options& options, FieldDescriptor::CppType type); + + // Get the declared type name in CamelCase format, as is used e.g. for the + // methods of WireFormat. For example, TYPE_INT32 becomes "Int32". + const char* DeclaredTypeMethodName(FieldDescriptor::Type type); + + // Return the code that evaluates to the number when compiled. + std::string Int32ToString(int number); + + // Get code that evaluates to the field's default value. + std::string DefaultValue(const Options& options, const FieldDescriptor* field); + + // Compatibility function for callers outside proto2. + std::string DefaultValue(const FieldDescriptor* field); + + // Convert a file name into a valid identifier. + std::string FilenameIdentifier(const std::string& filename); + + // For each .proto file generates a unique name. To prevent collisions of + // symbols in the global namespace + std::string UniqueName(const std::string& name, const std::string& filename, const Options& options); + inline std::string UniqueName(const std::string& name, const FileDescriptor* d, const Options& options) + { + return UniqueName(name, d->name(), options); + } + inline std::string UniqueName(const std::string& name, const Descriptor* d, const Options& options) + { + return UniqueName(name, d->file(), options); + } + inline std::string UniqueName(const std::string& name, const EnumDescriptor* d, const Options& options) + { + return UniqueName(name, d->file(), options); + } + inline std::string UniqueName(const std::string& name, const ServiceDescriptor* d, const Options& options) + { + return UniqueName(name, d->file(), options); + } + + // Versions for call sites that only support the internal runtime (like proto1 + // support). + inline Options InternalRuntimeOptions() + { + Options options; + options.opensource_runtime = false; + return options; + } + inline std::string UniqueName(const std::string& name, const std::string& filename) + { + return UniqueName(name, filename, InternalRuntimeOptions()); + } + inline std::string UniqueName(const std::string& name, const FileDescriptor* d) + { + return UniqueName(name, d->name(), InternalRuntimeOptions()); + } + inline std::string UniqueName(const std::string& name, const Descriptor* d) + { + return UniqueName(name, d->file(), InternalRuntimeOptions()); + } + inline std::string UniqueName(const std::string& name, const EnumDescriptor* d) + { + return UniqueName(name, d->file(), InternalRuntimeOptions()); + } + inline std::string UniqueName(const std::string& name, const ServiceDescriptor* d) + { + return UniqueName(name, d->file(), InternalRuntimeOptions()); + } + + // Return the qualified C++ name for a file level symbol. + std::string QualifiedFileLevelSymbol(const FileDescriptor* file, const std::string& name, const Options& options); + + // Escape C++ trigraphs by escaping question marks to \? + std::string EscapeTrigraphs(const std::string& to_escape); + + // Escaped function name to eliminate naming conflict. + std::string SafeFunctionName(const Descriptor* descriptor, const FieldDescriptor* field, const std::string& prefix); + + // Returns true if generated messages have public unknown fields accessors + inline bool PublicUnknownFieldsAccessors(const Descriptor* message) + { + return message->file()->syntax() != FileDescriptor::SYNTAX_PROTO3; + } + + // Returns the optimize mode for , respecting . + FileOptions_OptimizeMode GetOptimizeFor(const FileDescriptor* file, const Options& options); + + // Determines whether unknown fields will be stored in an UnknownFieldSet or + // a string. + inline bool UseUnknownFieldSet(const FileDescriptor* file, const Options& options) + { + return GetOptimizeFor(file, options) != FileOptions::LITE_RUNTIME; + } + + inline bool IsWeak(const FieldDescriptor* field, const Options& options) + { + if (field->options().weak()) + { + GOOGLE_CHECK(!options.opensource_runtime); + return true; + } + return false; + } + + bool IsStringInlined(const FieldDescriptor* descriptor, const Options& options); + + // For a string field, returns the effective ctype. If the actual ctype is + // not supported, returns the default of STRING. + FieldOptions::CType EffectiveStringCType(const FieldDescriptor* field, const Options& options); + + inline bool IsCord(const FieldDescriptor* field, const Options& options) + { + return field->cpp_type() == FieldDescriptor::CPPTYPE_STRING && + EffectiveStringCType(field, options) == FieldOptions::CORD; + } + + inline bool IsString(const FieldDescriptor* field, const Options& options) + { + return field->cpp_type() == FieldDescriptor::CPPTYPE_STRING && + EffectiveStringCType(field, options) == FieldOptions::STRING; + } + + inline bool IsStringPiece(const FieldDescriptor* field, const Options& options) + { + return field->cpp_type() == FieldDescriptor::CPPTYPE_STRING && + EffectiveStringCType(field, options) == FieldOptions::STRING_PIECE; + } + + class MessageSCCAnalyzer; + + // Does the given FileDescriptor use lazy fields? + bool HasLazyFields(const FileDescriptor* file, const Options& options, MessageSCCAnalyzer* scc_analyzer); + + // Is the given field a supported lazy field? + bool IsLazy(const FieldDescriptor* field, const Options& options, MessageSCCAnalyzer* scc_analyzer); + + // Is this an explicit (non-profile driven) lazy field, as denoted by + // lazy/unverified_lazy in the descriptor? + inline bool IsExplicitLazy(const FieldDescriptor* field) + { + return field->options().lazy() || field->options().unverified_lazy(); + } + + bool IsEagerlyVerifiedLazy(const FieldDescriptor* field, const Options& options, MessageSCCAnalyzer* scc_analyzer); + + bool IsLazilyVerifiedLazy(const FieldDescriptor* field, const Options& options); + + // Is the given message being split (go/pdsplit)? + bool ShouldSplit(const Descriptor* desc, const Options& options); + + // Is the given field being split out? + bool ShouldSplit(const FieldDescriptor* field, const Options& options); + + inline bool IsFieldUsed(const FieldDescriptor* /* field */, const Options& /* options */) + { + return true; + } + + // Returns true if "field" is stripped. + inline bool IsFieldStripped(const FieldDescriptor* /*field*/, const Options& /*options*/) + { + return false; + } + + // Does the file contain any definitions that need extension_set.h? + bool HasExtensionsOrExtendableMessage(const FileDescriptor* file); + + // Does the file have any repeated fields, necessitating the file to include + // repeated_field.h? This does not include repeated extensions, since those are + // all stored internally in an ExtensionSet, not a separate RepeatedField*. + bool HasRepeatedFields(const FileDescriptor* file); + + // Does the file have any string/bytes fields with ctype=STRING_PIECE? This + // does not include extensions, since ctype is ignored for extensions. + bool HasStringPieceFields(const FileDescriptor* file, const Options& options); + + // Does the file have any string/bytes fields with ctype=CORD? This does not + // include extensions, since ctype is ignored for extensions. + bool HasCordFields(const FileDescriptor* file, const Options& options); + + // Does the file have any map fields, necessitating the file to include + // map_field_inl.h and map.h. + bool HasMapFields(const FileDescriptor* file); + + // Does this file have any enum type definitions? + bool HasEnumDefinitions(const FileDescriptor* file); + + // Does this file have generated parsing, serialization, and other + // standard methods for which reflection-based fallback implementations exist? + inline bool HasGeneratedMethods(const FileDescriptor* file, const Options& options) + { + return GetOptimizeFor(file, options) != FileOptions::CODE_SIZE; + } + + // Do message classes in this file have descriptor and reflection methods? + inline bool HasDescriptorMethods(const FileDescriptor* file, const Options& options) + { + return GetOptimizeFor(file, options) != FileOptions::LITE_RUNTIME; + } + + // Should we generate generic services for this file? + inline bool HasGenericServices(const FileDescriptor* file, const Options& options) + { + return file->service_count() > 0 && + GetOptimizeFor(file, options) != FileOptions::LITE_RUNTIME && + file->options().cc_generic_services(); + } + + inline bool IsProto2MessageSet(const Descriptor* descriptor, const Options& options) + { + return !options.opensource_runtime && + options.enforce_mode != EnforceOptimizeMode::kLiteRuntime && + !options.lite_implicit_weak_fields && + descriptor->options().message_set_wire_format() && + descriptor->full_name() == "google.protobuf.bridge.MessageSet"; + } + + inline bool IsMapEntryMessage(const Descriptor* descriptor) + { + return descriptor->options().map_entry(); + } + + // Returns true if the field's CPPTYPE is string or message. + bool IsStringOrMessage(const FieldDescriptor* field); + + std::string UnderscoresToCamelCase(const std::string& input, bool cap_next_letter); + + inline bool IsProto3(const FileDescriptor* file) + { + return file->syntax() == FileDescriptor::SYNTAX_PROTO3; + } + + inline bool HasHasbit(const FieldDescriptor* field) + { + // This predicate includes proto3 message fields only if they have "optional". + // Foo submsg1 = 1; // HasHasbit() == false + // optional Foo submsg2 = 2; // HasHasbit() == true + // This is slightly odd, as adding "optional" to a singular proto3 field does + // not change the semantics or API. However whenever any field in a message + // has a hasbit, it forces reflection to include hasbit offsets for *all* + // fields, even if almost all of them are set to -1 (no hasbit). So to avoid + // causing a sudden size regression for ~all proto3 messages, we give proto3 + // message fields a hasbit only if "optional" is present. If the user is + // explicitly writing "optional", it is likely they are writing it on + // primitive fields also. + return (field->has_optional_keyword() || field->is_required()) && + !field->options().weak(); + } + + // Returns true if 'enum' semantics are such that unknown values are preserved + // in the enum field itself, rather than going to the UnknownFieldSet. + inline bool HasPreservingUnknownEnumSemantics(const FieldDescriptor* field) + { + return field->file()->syntax() == FileDescriptor::SYNTAX_PROTO3; + } + + inline bool IsCrossFileMessage(const FieldDescriptor* field) + { + return field->type() == FieldDescriptor::TYPE_MESSAGE && + field->message_type()->file() != field->file(); + } + + inline std::string MakeDefaultName(const FieldDescriptor* field) + { + return StrCat("_i_give_permission_to_break_this_code_default_", FieldName(field), "_"); + } + + // Semantically distinct from MakeDefaultName in that it gives the C++ code + // referencing a default field from the message scope, rather than just the + // variable name. + // For example, declarations of default variables should always use just + // MakeDefaultName to produce code like: + // Type _i_give_permission_to_break_this_code_default_field_; + // + // Code that references these should use MakeDefaultFieldName, in case the field + // exists at some nested level like: + // internal_container_._i_give_permission_to_break_this_code_default_field_; + inline std::string MakeDefaultFieldName(const FieldDescriptor* field) + { + return StrCat("Impl_::", MakeDefaultName(field)); + } + + inline std::string MakeVarintCachedSizeName(const FieldDescriptor* field) + { + return StrCat("_", FieldName(field), "_cached_byte_size_"); + } + + // Semantically distinct from MakeVarintCachedSizeName in that it gives the C++ + // code referencing the object from the message scope, rather than just the + // variable name. + // For example, declarations of default variables should always use just + // MakeVarintCachedSizeName to produce code like: + // Type _field_cached_byte_size_; + // + // Code that references these variables should use + // MakeVarintCachedSizeFieldName, in case the field exists at some nested level + // like: + // internal_container_._field_cached_byte_size_; + inline std::string MakeVarintCachedSizeFieldName(const FieldDescriptor* field, bool split) + { + return StrCat("_impl_.", split ? "_split_->" : "", "_", FieldName(field), "_cached_byte_size_"); + } + + // Note: A lot of libraries detect Any protos based on Descriptor::full_name() + // while the two functions below use FileDescriptor::name(). In a sane world the + // two approaches should be equivalent. But if you are dealing with descriptors + // from untrusted sources, you might need to match semantics across libraries. + bool IsAnyMessage(const FileDescriptor* descriptor, const Options& options); + bool IsAnyMessage(const Descriptor* descriptor, const Options& options); + + bool IsWellKnownMessage(const FileDescriptor* descriptor); + + inline std::string IncludeGuard(const FileDescriptor* file, bool pb_h, const Options& options) + { + // If we are generating a .pb.h file and the proto_h option is enabled, then + // the .pb.h gets an extra suffix. + std::string filename_identifier = FilenameIdentifier( + file->name() + (pb_h && options.proto_h ? ".pb.h" : "") + ); + + if (IsWellKnownMessage(file)) + { + // For well-known messages we need third_party/protobuf and net/proto2 to + // have distinct include guards, because some source files include both and + // both need to be defined (the third_party copies will be in the + // google::protobuf_opensource namespace). + return MacroPrefix(options) + "_INCLUDED_" + filename_identifier; + } + else + { + // Ideally this case would use distinct include guards for opensource and + // google3 protos also. (The behavior of "first #included wins" is not + // ideal). But unfortunately some legacy code includes both and depends on + // the identical include guards to avoid compile errors. + // + // We should clean this up so that this case can be removed. + return "GOOGLE_PROTOBUF_INCLUDED_" + filename_identifier; + } + } + + // Returns the OptimizeMode for this file, furthermore it updates a status + // bool if has_opt_codesize_extension is non-null. If this status bool is true + // it means this file contains an extension that itself is defined as + // optimized_for = CODE_SIZE. + FileOptions_OptimizeMode GetOptimizeFor(const FileDescriptor* file, const Options& options, bool* has_opt_codesize_extension); + inline FileOptions_OptimizeMode GetOptimizeFor(const FileDescriptor* file, const Options& options) + { + return GetOptimizeFor(file, options, nullptr); + } + inline bool NeedsEagerDescriptorAssignment(const FileDescriptor* file, const Options& options) + { + bool has_opt_codesize_extension; + if (GetOptimizeFor(file, options, &has_opt_codesize_extension) == + FileOptions::CODE_SIZE && + has_opt_codesize_extension) + { + // If this filedescriptor contains an extension from another file which + // is optimized_for = CODE_SIZE. We need to be careful in the ordering so + // we eagerly build the descriptors in the dependencies before building + // the descriptors of this file. + return true; + } + else + { + // If we have a generated code based parser we never need eager + // initialization of descriptors of our deps. + return false; + } + } + + // This orders the messages in a .pb.cc as it's outputted by file.cc + void FlattenMessagesInFile(const FileDescriptor* file, std::vector* result); + inline std::vector FlattenMessagesInFile( + const FileDescriptor* file + ) + { + std::vector result; + FlattenMessagesInFile(file, &result); + return result; + } + + template + void ForEachMessage(const Descriptor* descriptor, F&& func) + { + for (int i = 0; i < descriptor->nested_type_count(); i++) + ForEachMessage(descriptor->nested_type(i), std::forward(func)); + func(descriptor); + } + + template + void ForEachMessage(const FileDescriptor* descriptor, F&& func) + { + for (int i = 0; i < descriptor->message_type_count(); i++) + ForEachMessage(descriptor->message_type(i), std::forward(func)); + } + + bool HasWeakFields(const Descriptor* desc, const Options& options); + bool HasWeakFields(const FileDescriptor* desc, const Options& options); + + // Returns true if the "required" restriction check should be ignored for the + // given field. + inline static bool ShouldIgnoreRequiredFieldCheck(const FieldDescriptor* field, const Options& options) + { + // Do not check "required" for lazily verified lazy fields. + return IsLazilyVerifiedLazy(field, options); + } + + struct MessageAnalysis + { + bool is_recursive = false; + bool contains_cord = false; + bool contains_extension = false; + bool contains_required = false; + bool contains_weak = false; // Implicit weak as well. + }; + + // This class is used in FileGenerator, to ensure linear instead of + // quadratic performance, if we do this per message we would get O(V*(V+E)). + // Logically this is just only used in message.cc, but in the header for + // FileGenerator to help share it. + class PROTOC_EXPORT MessageSCCAnalyzer + { + public: + explicit MessageSCCAnalyzer(const Options& options) : + options_(options) + { + } + + MessageAnalysis GetSCCAnalysis(const SCC* scc); + + bool HasRequiredFields(const Descriptor* descriptor) + { + MessageAnalysis result = GetSCCAnalysis(GetSCC(descriptor)); + return result.contains_required || result.contains_extension; + } + bool HasWeakField(const Descriptor* descriptor) + { + MessageAnalysis result = GetSCCAnalysis(GetSCC(descriptor)); + return result.contains_weak; + } + const SCC* GetSCC(const Descriptor* descriptor) + { + return analyzer_.GetSCC(descriptor); + } + + private: + struct DepsGenerator + { + std::vector operator()(const Descriptor* desc) const + { + std::vector deps; + for (int i = 0; i < desc->field_count(); i++) + { + if (desc->field(i)->message_type()) + { + deps.push_back(desc->field(i)->message_type()); + } + } + return deps; + } + }; + SCCAnalyzer analyzer_; + Options options_; + std::map analysis_cache_; + }; + + void ListAllFields(const Descriptor* d, std::vector* fields); + void ListAllFields(const FileDescriptor* d, std::vector* fields); + + template + void ForEachField(const Descriptor* d, T&& func) + { + for (int i = 0; i < d->nested_type_count(); i++) + { + ForEachField(d->nested_type(i), std::forward(func)); + } + for (int i = 0; i < d->extension_count(); i++) + { + func(d->extension(i)); + } + for (int i = 0; i < d->field_count(); i++) + { + func(d->field(i)); + } + } + + template + void ForEachField(const FileDescriptor* d, T&& func) + { + for (int i = 0; i < d->message_type_count(); i++) + { + ForEachField(d->message_type(i), std::forward(func)); + } + for (int i = 0; i < d->extension_count(); i++) + { + func(d->extension(i)); + } + } + + void ListAllTypesForServices(const FileDescriptor* fd, std::vector* types); + + // Indicates whether we should use implicit weak fields for this file. + bool UsingImplicitWeakFields(const FileDescriptor* file, const Options& options); + + // Indicates whether to treat this field as implicitly weak. + bool IsImplicitWeakField(const FieldDescriptor* field, const Options& options, MessageSCCAnalyzer* scc_analyzer); + + inline bool HasSimpleBaseClass(const Descriptor* desc, const Options& options) + { + if (!HasDescriptorMethods(desc->file(), options)) + return false; + if (desc->extension_range_count() != 0) + return false; + if (desc->field_count() == 0) + return true; + // TODO(jorg): Support additional common message types with only one + // or two fields + return false; + } + + inline bool HasSimpleBaseClasses(const FileDescriptor* file, const Options& options) + { + bool v = false; + ForEachMessage(file, [&v, &options](const Descriptor* desc) + { v |= HasSimpleBaseClass(desc, options); }); + return v; + } + + inline std::string SimpleBaseClass(const Descriptor* desc, const Options& options) + { + if (!HasDescriptorMethods(desc->file(), options)) + return ""; + if (desc->extension_range_count() != 0) + return ""; + if (desc->field_count() == 0) + { + return "ZeroFieldsBase"; + } + // TODO(jorg): Support additional common message types with only one + // or two fields + return ""; + } + + // Returns true if this message has a _tracker_ field. + inline bool HasTracker(const Descriptor* desc, const Options& options) + { + return options.field_listener_options.inject_field_listener_events && + desc->file()->options().optimize_for() != + google::protobuf::FileOptions::LITE_RUNTIME; + } + + // Returns true if this message needs an Impl_ struct for it's data. + inline bool HasImplData(const Descriptor* desc, const Options& options) + { + return !HasSimpleBaseClass(desc, options); + } + + // Formatter is a functor class which acts as a closure around printer and + // the variable map. It's much like printer->Print except it supports both named + // variables that are substituted using a key value map and direct arguments. In + // the format string $1$, $2$, etc... are substituted for the first, second, ... + // direct argument respectively in the format call, it accepts both strings and + // integers. The implementation verifies all arguments are used and are "first" + // used in order of appearance in the argument list. For example, + // + // Format("return array[$1$];", 3) -> "return array[3];" + // Format("array[$2$] = $1$;", "Bla", 3) -> FATAL error (wrong order) + // Format("array[$1$] = $2$;", 3, "Bla") -> "array[3] = Bla;" + // + // The arguments can be used more than once like + // + // Format("array[$1$] = $2$; // Index = $1$", 3, "Bla") -> + // "array[3] = Bla; // Index = 3" + // + // If you use more arguments use the following style to help the reader, + // + // Format("int $1$() {\n" + // " array[$2$] = $3$;\n" + // " return $4$;" + // "}\n", + // funname, // 1 + // idx, // 2 + // varname, // 3 + // retval); // 4 + // + // but consider using named variables. Named variables like $foo$, with some + // identifier foo, are looked up in the map. One additional feature is that + // spaces are accepted between the '$' delimiters, $ foo$ will + // substitute to " bar" if foo stands for "bar", but in case it's empty + // will substitute to "". Hence, for example, + // + // Format(vars, "$dllexport $void fun();") -> "void fun();" + // "__declspec(export) void fun();" + // + // which is convenient to prevent double, leading or trailing spaces. + class PROTOC_EXPORT Formatter + { + public: + explicit Formatter(io::Printer* printer) : + printer_(printer) + { + } + Formatter(io::Printer* printer, const std::map& vars) : + printer_(printer), + vars_(vars) + { + } + + template + void Set(const std::string& key, const T& value) + { + vars_[key] = ToString(value); + } + + void AddMap(const std::map& vars) + { + for (const auto& keyval : vars) + vars_[keyval.first] = keyval.second; + } + + template + void operator()(const char* format, const Args&... args) const + { + printer_->FormatInternal({ToString(args)...}, vars_, format); + } + + void Indent() const + { + printer_->Indent(); + } + void Outdent() const + { + printer_->Outdent(); + } + io::Printer* printer() const + { + return printer_; + } + + class PROTOC_EXPORT ScopedIndenter + { + public: + explicit ScopedIndenter(Formatter* format) : + format_(format) + { + format_->Indent(); + } + ~ScopedIndenter() + { + format_->Outdent(); + } + + private: + Formatter* format_; + }; + + PROTOBUF_NODISCARD ScopedIndenter ScopedIndent() + { + return ScopedIndenter(this); + } + template + PROTOBUF_NODISCARD ScopedIndenter ScopedIndent(const char* format, const Args&&... args) + { + (*this)(format, static_cast(args)...); + return ScopedIndenter(this); + } + + class PROTOC_EXPORT SaveState + { + public: + explicit SaveState(Formatter* format) : + format_(format), + vars_(format->vars_) + { + } + ~SaveState() + { + format_->vars_.swap(vars_); + } + + private: + Formatter* format_; + std::map vars_; + }; + + private: + io::Printer* printer_; + std::map vars_; + + // Convenience overloads to accept different types as arguments. + static std::string ToString(const std::string& s) + { + return s; + } + template::value>::type> + static std::string ToString(I x) + { + return StrCat(x); + } + static std::string ToString(strings::Hex x) + { + return StrCat(x); + } + static std::string ToString(const FieldDescriptor* d) + { + return Payload(d); + } + static std::string ToString(const Descriptor* d) + { + return Payload(d); + } + static std::string ToString(const EnumDescriptor* d) + { + return Payload(d); + } + static std::string ToString(const EnumValueDescriptor* d) + { + return Payload(d); + } + static std::string ToString(const OneofDescriptor* d) + { + return Payload(d); + } + + template + static std::string Payload(const Descriptor* descriptor) + { + std::vector path; + descriptor->GetLocationPath(&path); + GeneratedCodeInfo::Annotation annotation; + for (int index : path) + { + annotation.add_path(index); + } + annotation.set_source_file(descriptor->file()->name()); + return annotation.SerializeAsString(); + } + }; + + template + void PrintFieldComment(const Formatter& format, const T* field) + { + // Print the field's (or oneof's) proto-syntax definition as a comment. + // We don't want to print group bodies so we cut off after the first + // line. + DebugStringOptions options; + options.elide_group_body = true; + options.elide_oneof_body = true; + std::string def = field->DebugStringWithOptions(options); + format("// $1$\n", def.substr(0, def.find_first_of('\n'))); + } + + class PROTOC_EXPORT NamespaceOpener + { + public: + explicit NamespaceOpener(const Formatter& format) : + printer_(format.printer()) + { + } + NamespaceOpener(const std::string& name, const Formatter& format) : + NamespaceOpener(format) + { + ChangeTo(name); + } + ~NamespaceOpener() + { + ChangeTo(""); + } + + void ChangeTo(const std::string& name) + { + std::vector new_stack_ = + Split(name, "::", true); + size_t len = std::min(name_stack_.size(), new_stack_.size()); + size_t common_idx = 0; + while (common_idx < len) + { + if (name_stack_[common_idx] != new_stack_[common_idx]) + break; + common_idx++; + } + for (auto it = name_stack_.crbegin(); + it != name_stack_.crend() - common_idx; + ++it) + { + if (*it == "PROTOBUF_NAMESPACE_ID") + { + printer_->Print("PROTOBUF_NAMESPACE_CLOSE\n"); + } + else + { + printer_->Print("} // namespace $ns$\n", "ns", *it); + } + } + name_stack_.swap(new_stack_); + for (size_t i = common_idx; i < name_stack_.size(); ++i) + { + if (name_stack_[i] == "PROTOBUF_NAMESPACE_ID") + { + printer_->Print("PROTOBUF_NAMESPACE_OPEN\n"); + } + else + { + printer_->Print("namespace $ns$ {\n", "ns", name_stack_[i]); + } + } + } + + private: + io::Printer* printer_; + std::vector name_stack_; + }; + + enum class Utf8CheckMode + { + kStrict = 0, // Parsing will fail if non UTF-8 data is in string fields. + kVerify = 1, // Only log an error but parsing will succeed. + kNone = 2, // No UTF-8 check. + }; + + Utf8CheckMode GetUtf8CheckMode(const FieldDescriptor* field, const Options& options); + + void GenerateUtf8CheckCodeForString(const FieldDescriptor* field, const Options& options, bool for_parse, const char* parameters, const Formatter& format); + + void GenerateUtf8CheckCodeForCord(const FieldDescriptor* field, const Options& options, bool for_parse, const char* parameters, const Formatter& format); + + template + struct FieldRangeImpl + { + struct Iterator + { + using iterator_category = std::forward_iterator_tag; + using value_type = const FieldDescriptor*; + using difference_type = int; + + value_type operator*() + { + return descriptor->field(idx); + } + + friend bool operator==(const Iterator& a, const Iterator& b) + { + GOOGLE_DCHECK(a.descriptor == b.descriptor); + return a.idx == b.idx; + } + friend bool operator!=(const Iterator& a, const Iterator& b) + { + return !(a == b); + } + + Iterator& operator++() + { + idx++; + return *this; + } + + int idx; + const T* descriptor; + }; + + Iterator begin() const + { + return {0, descriptor}; + } + Iterator end() const + { + return {descriptor->field_count(), descriptor}; + } + + const T* descriptor; + }; + + template + FieldRangeImpl FieldRange(const T* desc) + { + return {desc}; + } + + struct OneOfRangeImpl + { + struct Iterator + { + using iterator_category = std::forward_iterator_tag; + using value_type = const OneofDescriptor*; + using difference_type = int; + + value_type operator*() + { + return descriptor->oneof_decl(idx); + } + + friend bool operator==(const Iterator& a, const Iterator& b) + { + GOOGLE_DCHECK(a.descriptor == b.descriptor); + return a.idx == b.idx; + } + friend bool operator!=(const Iterator& a, const Iterator& b) + { + return !(a == b); + } + + Iterator& operator++() + { + idx++; + return *this; + } + + int idx; + const Descriptor* descriptor; + }; + + Iterator begin() const + { + return {0, descriptor}; + } + Iterator end() const + { + return {descriptor->real_oneof_decl_count(), descriptor}; + } + + const Descriptor* descriptor; + }; + + inline OneOfRangeImpl OneOfRange(const Descriptor* desc) + { + return {desc}; + } + + PROTOC_EXPORT std::string StripProto(const std::string& filename); + + bool EnableMessageOwnedArena(const Descriptor* desc, const Options& options); + + bool EnableMessageOwnedArenaTrial(const Descriptor* desc, const Options& options); + + bool ShouldVerify(const Descriptor* descriptor, const Options& options, MessageSCCAnalyzer* scc_analyzer); + bool ShouldVerify(const FileDescriptor* file, const Options& options, MessageSCCAnalyzer* scc_analyzer); + + // Indicates whether to use predefined verify methods for a given message. If a + // message is "simple" and needs no special verification per field (e.g. message + // field, repeated packed, UTF8 string, etc.), we can use either VerifySimple or + // VerifySimpleAlwaysCheckInt32 methods as all verification can be done based on + // the wire type. + // + // Otherwise, we need "custom" verify methods tailored to a message to pass + // which field needs a special verification; i.e. InternalVerify. + enum class VerifySimpleType + { + kSimpleInt32Never, // Use VerifySimple + kSimpleInt32Always, // Use VerifySimpleAlwaysCheckInt32 + kCustom, // Use InternalVerify and check only for int32 + kCustomInt32Never, // Use InternalVerify but never check for int32 + kCustomInt32Always, // Use InternalVerify and always check for int32 + }; + + // Returns VerifySimpleType if messages can be verified by predefined methods. + VerifySimpleType ShouldVerifySimple(const Descriptor* descriptor); + + bool IsUtf8String(const FieldDescriptor* field); + + bool HasMessageFieldOrExtension(const Descriptor* desc); + + } // namespace cpp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CPP_HELPERS_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/names.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/names.h new file mode 100644 index 00000000..bb115abc --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/names.h @@ -0,0 +1,101 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_NAMES_H__ +#define GOOGLE_PROTOBUF_COMPILER_CPP_NAMES_H__ + +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + class Descriptor; + class EnumDescriptor; + class EnumValueDescriptor; + class FieldDescriptor; + + namespace compiler + { + namespace cpp + { + + // Returns the unqualified C++ name. + // + // For example, if you had: + // package foo.bar; + // message Baz { message Moo {} } + // Then the non-qualified version would be: + // Baz_Moo + std::string ClassName(const Descriptor* descriptor); + std::string ClassName(const EnumDescriptor* enum_descriptor); + + // Returns the fully qualified C++ name. + // + // For example, if you had: + // package foo.bar; + // message Baz { message Moo {} } + // Then the qualified ClassName for Moo would be: + // ::foo::bar::Baz_Moo + std::string QualifiedClassName(const Descriptor* d); + std::string QualifiedClassName(const EnumDescriptor* d); + std::string QualifiedExtensionName(const FieldDescriptor* d); + + // Get the (unqualified) name that should be used for this field in C++ code. + // The name is coerced to lower-case to emulate proto1 behavior. People + // should be using lowercase-with-underscores style for proto field names + // anyway, so normally this just returns field->name(). + std::string FieldName(const FieldDescriptor* field); + + // Requires that this field is in a oneof. Returns the (unqualified) case + // constant for this field. + std::string OneofCaseConstantName(const FieldDescriptor* field); + // Returns the quafilied case constant for this field. + std::string QualifiedOneofCaseConstantName(const FieldDescriptor* field); + + // Get the (unqualified) name that should be used for this enum value in C++ + // code. + std::string EnumValueName(const EnumValueDescriptor* enum_value); + + // Strips ".proto" or ".protodevel" from the end of a filename. + PROTOC_EXPORT std::string StripProto(const std::string& filename); + + } // namespace cpp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CPP_NAMES_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_doc_comment.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_doc_comment.h new file mode 100644 index 00000000..dd15ac12 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_doc_comment.h @@ -0,0 +1,54 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_DOC_COMMENT_H__ +#define GOOGLE_PROTOBUF_COMPILER_CSHARP_DOC_COMMENT_H__ + +#include +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace csharp + { + void WriteMessageDocComment(io::Printer* printer, const Descriptor* message); + void WritePropertyDocComment(io::Printer* printer, const FieldDescriptor* field); + void WriteEnumDocComment(io::Printer* printer, const EnumDescriptor* enumDescriptor); + void WriteEnumValueDocComment(io::Printer* printer, const EnumValueDescriptor* value); + void WriteMethodDocComment(io::Printer* printer, const MethodDescriptor* method); + } // namespace csharp + } // namespace compiler + } // namespace protobuf +} // namespace google +#endif // GOOGLE_PROTOBUF_COMPILER_CSHARP_DOC_COMMENT_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_generator.h new file mode 100644 index 00000000..93d77835 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_generator.h @@ -0,0 +1,76 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Generates C# code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__ + +#include + +#include + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace csharp + { + + // CodeGenerator implementation which generates a C# source file and + // header. If you create your own protocol compiler binary and you want + // it to support C# output, you can do so by registering an instance of this + // CodeGenerator with the CommandLineInterface in your main() function. + class PROTOC_EXPORT Generator : public CodeGenerator + { + public: + Generator(); + ~Generator(); + bool Generate( + const FileDescriptor* file, + const std::string& parameter, + GeneratorContext* generator_context, + std::string* error + ) const override; + uint64_t GetSupportedFeatures() const override; + }; + + } // namespace csharp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_names.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_names.h new file mode 100644 index 00000000..719b25d4 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_names.h @@ -0,0 +1,109 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Provides a mechanism for mapping a descriptor to the +// fully-qualified name of the corresponding C# class. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__ +#define GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__ + +#include +#include +#include + +#include + +namespace google +{ + namespace protobuf + { + + class Descriptor; + class EnumDescriptor; + class FileDescriptor; + class ServiceDescriptor; + + namespace compiler + { + namespace csharp + { + + // Requires: + // descriptor != NULL + // + // Returns: + // The namespace to use for given file descriptor. + std::string PROTOC_EXPORT GetFileNamespace(const FileDescriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified C# class name. + std::string PROTOC_EXPORT GetClassName(const Descriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified name of the C# class that provides + // access to the file descriptor. Proto compiler generates + // such class for each .proto file processed. + std::string PROTOC_EXPORT + GetReflectionClassName(const FileDescriptor* descriptor); + + // Generates output file name for given file descriptor. If generate_directories + // is true, the output file will be put under directory corresponding to file's + // namespace. base_namespace can be used to strip some of the top level + // directories. E.g. for file with namespace "Bar.Foo" and base_namespace="Bar", + // the resulting file will be put under directory "Foo" (and not "Bar/Foo"). + // + // Requires: + // descriptor != NULL + // error != NULL + // + // Returns: + // The file name to use as output file for given file descriptor. In case + // of failure, this function will return empty string and error parameter + // will contain the error message. + std::string PROTOC_EXPORT GetOutputFile(const FileDescriptor* descriptor, const std::string file_extension, const bool generate_directories, const std::string base_namespace, std::string* error); + + } // namespace csharp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_options.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_options.h new file mode 100644 index 00000000..32c03dec --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_options.h @@ -0,0 +1,87 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_OPTIONS_H__ +#define GOOGLE_PROTOBUF_COMPILER_CSHARP_OPTIONS_H__ + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace csharp + { + + // Generator options (used by csharp_generator.cc): + struct Options + { + Options() : + file_extension(".cs"), + base_namespace(""), + base_namespace_specified(false), + internal_access(false), + serializable(false) + { + } + // Extension of the generated file. Defaults to ".cs" + std::string file_extension; + // Base namespace to use to create directory hierarchy. Defaults to "". + // This option allows the simple creation of a conventional C# file layout, + // where directories are created relative to a project-specific base + // namespace. For example, in a project with a base namespace of PetShop, a + // proto of user.proto with a C# namespace of PetShop.Model.Shared would + // generate Model/Shared/User.cs underneath the specified --csharp_out + // directory. + // + // If no base namespace is specified, all files are generated in the + // --csharp_out directory, with no subdirectories created automatically. + std::string base_namespace; + // Whether the base namespace has been explicitly specified by the user. + // This is required as the base namespace can be explicitly set to the empty + // string, meaning "create a full directory hierarchy, starting from the first + // segment of the namespace." + bool base_namespace_specified; + // Whether the generated classes should have accessibility level of "internal". + // Defaults to false that generates "public" classes. + bool internal_access; + // Whether the generated classes should have a global::System.Serializable attribute added + // Defaults to false + bool serializable; + }; + + } // namespace csharp + } // namespace compiler + } // namespace protobuf +} // namespace google + +#endif // GOOGLE_PROTOBUF_COMPILER_CSHARP_OPTIONS_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/importer.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/importer.h new file mode 100644 index 00000000..faacae30 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/importer.h @@ -0,0 +1,346 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file is the public interface to the .proto file parser. + +#ifndef GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__ +#define GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__ + +#include +#include +#include +#include + +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + namespace io + { + class ZeroCopyInputStream; + } + + namespace compiler + { + + // Defined in this file. + class Importer; + class MultiFileErrorCollector; + class SourceTree; + class DiskSourceTree; + + // TODO(kenton): Move all SourceTree stuff to a separate file? + + // An implementation of DescriptorDatabase which loads files from a SourceTree + // and parses them. + // + // Note: This class is not thread-safe since it maintains a table of source + // code locations for error reporting. However, when a DescriptorPool wraps + // a DescriptorDatabase, it uses mutex locking to make sure only one method + // of the database is called at a time, even if the DescriptorPool is used + // from multiple threads. Therefore, there is only a problem if you create + // multiple DescriptorPools wrapping the same SourceTreeDescriptorDatabase + // and use them from multiple threads. + // + // Note: This class does not implement FindFileContainingSymbol() or + // FindFileContainingExtension(); these will always return false. + class PROTOBUF_EXPORT SourceTreeDescriptorDatabase : public DescriptorDatabase + { + public: + SourceTreeDescriptorDatabase(SourceTree* source_tree); + + // If non-NULL, fallback_database will be checked if a file doesn't exist in + // the specified source_tree. + SourceTreeDescriptorDatabase(SourceTree* source_tree, DescriptorDatabase* fallback_database); + ~SourceTreeDescriptorDatabase() override; + + // Instructs the SourceTreeDescriptorDatabase to report any parse errors + // to the given MultiFileErrorCollector. This should be called before + // parsing. error_collector must remain valid until either this method + // is called again or the SourceTreeDescriptorDatabase is destroyed. + void RecordErrorsTo(MultiFileErrorCollector* error_collector) + { + error_collector_ = error_collector; + } + + // Gets a DescriptorPool::ErrorCollector which records errors to the + // MultiFileErrorCollector specified with RecordErrorsTo(). This collector + // has the ability to determine exact line and column numbers of errors + // from the information given to it by the DescriptorPool. + DescriptorPool::ErrorCollector* GetValidationErrorCollector() + { + using_validation_error_collector_ = true; + return &validation_error_collector_; + } + + // implements DescriptorDatabase ----------------------------------- + bool FindFileByName(const std::string& filename, FileDescriptorProto* output) override; + bool FindFileContainingSymbol(const std::string& symbol_name, FileDescriptorProto* output) override; + bool FindFileContainingExtension(const std::string& containing_type, int field_number, FileDescriptorProto* output) override; + + private: + class SingleFileErrorCollector; + + SourceTree* source_tree_; + DescriptorDatabase* fallback_database_; + MultiFileErrorCollector* error_collector_; + + class PROTOBUF_EXPORT ValidationErrorCollector : public DescriptorPool::ErrorCollector + { + public: + ValidationErrorCollector(SourceTreeDescriptorDatabase* owner); + ~ValidationErrorCollector() override; + + // implements ErrorCollector --------------------------------------- + void AddError(const std::string& filename, const std::string& element_name, const Message* descriptor, ErrorLocation location, const std::string& message) override; + + void AddWarning(const std::string& filename, const std::string& element_name, const Message* descriptor, ErrorLocation location, const std::string& message) override; + + private: + SourceTreeDescriptorDatabase* owner_; + }; + friend class ValidationErrorCollector; + + bool using_validation_error_collector_; + SourceLocationTable source_locations_; + ValidationErrorCollector validation_error_collector_; + }; + + // Simple interface for parsing .proto files. This wraps the process + // of opening the file, parsing it with a Parser, recursively parsing all its + // imports, and then cross-linking the results to produce a FileDescriptor. + // + // This is really just a thin wrapper around SourceTreeDescriptorDatabase. + // You may find that SourceTreeDescriptorDatabase is more flexible. + // + // TODO(kenton): I feel like this class is not well-named. + class PROTOBUF_EXPORT Importer + { + public: + Importer(SourceTree* source_tree, MultiFileErrorCollector* error_collector); + ~Importer(); + + // Import the given file and build a FileDescriptor representing it. If + // the file is already in the DescriptorPool, the existing FileDescriptor + // will be returned. The FileDescriptor is property of the DescriptorPool, + // and will remain valid until it is destroyed. If any errors occur, they + // will be reported using the error collector and Import() will return NULL. + // + // A particular Importer object will only report errors for a particular + // file once. All future attempts to import the same file will return NULL + // without reporting any errors. The idea is that you might want to import + // a lot of files without seeing the same errors over and over again. If + // you want to see errors for the same files repeatedly, you can use a + // separate Importer object to import each one (but use the same + // DescriptorPool so that they can be cross-linked). + const FileDescriptor* Import(const std::string& filename); + + // The DescriptorPool in which all imported FileDescriptors and their + // contents are stored. + inline const DescriptorPool* pool() const + { + return &pool_; + } + + void AddUnusedImportTrackFile(const std::string& file_name, bool is_error = false); + void ClearUnusedImportTrackFiles(); + + private: + SourceTreeDescriptorDatabase database_; + DescriptorPool pool_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Importer); + }; + + // If the importer encounters problems while trying to import the proto files, + // it reports them to a MultiFileErrorCollector. + class PROTOBUF_EXPORT MultiFileErrorCollector + { + public: + inline MultiFileErrorCollector() + { + } + virtual ~MultiFileErrorCollector(); + + // Line and column numbers are zero-based. A line number of -1 indicates + // an error with the entire file (e.g. "not found"). + virtual void AddError(const std::string& filename, int line, int column, const std::string& message) = 0; + + virtual void AddWarning(const std::string& /* filename */, int /* line */, int /* column */, const std::string& /* message */) + { + } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MultiFileErrorCollector); + }; + + // Abstract interface which represents a directory tree containing proto files. + // Used by the default implementation of Importer to resolve import statements + // Most users will probably want to use the DiskSourceTree implementation, + // below. + class PROTOBUF_EXPORT SourceTree + { + public: + inline SourceTree() + { + } + virtual ~SourceTree(); + + // Open the given file and return a stream that reads it, or NULL if not + // found. The caller takes ownership of the returned object. The filename + // must be a path relative to the root of the source tree and must not + // contain "." or ".." components. + virtual io::ZeroCopyInputStream* Open(const std::string& filename) = 0; + + // If Open() returns NULL, calling this method immediately will return an + // description of the error. + // Subclasses should implement this method and return a meaningful value for + // better error reporting. + // TODO(xiaofeng): change this to a pure virtual function. + virtual std::string GetLastErrorMessage(); + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SourceTree); + }; + + // An implementation of SourceTree which loads files from locations on disk. + // Multiple mappings can be set up to map locations in the DiskSourceTree to + // locations in the physical filesystem. + class PROTOBUF_EXPORT DiskSourceTree : public SourceTree + { + public: + DiskSourceTree(); + ~DiskSourceTree() override; + + // Map a path on disk to a location in the SourceTree. The path may be + // either a file or a directory. If it is a directory, the entire tree + // under it will be mapped to the given virtual location. To map a directory + // to the root of the source tree, pass an empty string for virtual_path. + // + // If multiple mapped paths apply when opening a file, they will be searched + // in order. For example, if you do: + // MapPath("bar", "foo/bar"); + // MapPath("", "baz"); + // and then you do: + // Open("bar/qux"); + // the DiskSourceTree will first try to open foo/bar/qux, then baz/bar/qux, + // returning the first one that opens successfully. + // + // disk_path may be an absolute path or relative to the current directory, + // just like a path you'd pass to open(). + void MapPath(const std::string& virtual_path, const std::string& disk_path); + + // Return type for DiskFileToVirtualFile(). + enum DiskFileToVirtualFileResult + { + SUCCESS, + SHADOWED, + CANNOT_OPEN, + NO_MAPPING + }; + + // Given a path to a file on disk, find a virtual path mapping to that + // file. The first mapping created with MapPath() whose disk_path contains + // the filename is used. However, that virtual path may not actually be + // usable to open the given file. Possible return values are: + // * SUCCESS: The mapping was found. *virtual_file is filled in so that + // calling Open(*virtual_file) will open the file named by disk_file. + // * SHADOWED: A mapping was found, but using Open() to open this virtual + // path will end up returning some different file. This is because some + // other mapping with a higher precedence also matches this virtual path + // and maps it to a different file that exists on disk. *virtual_file + // is filled in as it would be in the SUCCESS case. *shadowing_disk_file + // is filled in with the disk path of the file which would be opened if + // you were to call Open(*virtual_file). + // * CANNOT_OPEN: The mapping was found and was not shadowed, but the + // file specified cannot be opened. When this value is returned, + // errno will indicate the reason the file cannot be opened. *virtual_file + // will be set to the virtual path as in the SUCCESS case, even though + // it is not useful. + // * NO_MAPPING: Indicates that no mapping was found which contains this + // file. + DiskFileToVirtualFileResult DiskFileToVirtualFile( + const std::string& disk_file, std::string* virtual_file, std::string* shadowing_disk_file + ); + + // Given a virtual path, find the path to the file on disk. + // Return true and update disk_file with the on-disk path if the file exists. + // Return false and leave disk_file untouched if the file doesn't exist. + bool VirtualFileToDiskFile(const std::string& virtual_file, std::string* disk_file); + + // implements SourceTree ------------------------------------------- + io::ZeroCopyInputStream* Open(const std::string& filename) override; + + std::string GetLastErrorMessage() override; + + private: + struct Mapping + { + std::string virtual_path; + std::string disk_path; + + inline Mapping(const std::string& virtual_path_param, const std::string& disk_path_param) : + virtual_path(virtual_path_param), + disk_path(disk_path_param) + { + } + }; + std::vector mappings_; + std::string last_error_message_; + + // Like Open(), but returns the on-disk path in disk_file if disk_file is + // non-NULL and the file could be successfully opened. + io::ZeroCopyInputStream* OpenVirtualFile(const std::string& virtual_file, std::string* disk_file); + + // Like Open() but given the actual on-disk path. + io::ZeroCopyInputStream* OpenDiskFile(const std::string& filename); + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DiskSourceTree); + }; + + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/java/generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/generator.h new file mode 100644 index 00000000..c3a34669 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/generator.h @@ -0,0 +1,81 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Generates Java code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__ + +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace java + { + + // CodeGenerator implementation which generates Java code. If you create your + // own protocol compiler binary and you want it to support Java output, you + // can do so by registering an instance of this CodeGenerator with the + // CommandLineInterface in your main() function. + class PROTOC_EXPORT JavaGenerator : public CodeGenerator + { + public: + JavaGenerator(); + ~JavaGenerator() override; + + // implements CodeGenerator ---------------------------------------- + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override; + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(JavaGenerator); + }; + + } // namespace java + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/java/java_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/java_generator.h new file mode 100644 index 00000000..294b1bde --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/java_generator.h @@ -0,0 +1,6 @@ +#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_JAVA_GENERATOR_H_ +#define GOOGLE_PROTOBUF_COMPILER_JAVA_JAVA_GENERATOR_H_ + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_JAVA_JAVA_GENERATOR_H_ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/java/kotlin_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/kotlin_generator.h new file mode 100644 index 00000000..ec2772c0 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/kotlin_generator.h @@ -0,0 +1,78 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Generates Kotlin code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_KOTLIN_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_JAVA_KOTLIN_GENERATOR_H__ + +#include + +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace java + { + + // CodeGenerator implementation which generates Kotlin code. If you create your + // own protocol compiler binary and you want it to support Kotlin output, you + // can do so by registering an instance of this CodeGenerator with the + // CommandLineInterface in your main() function. + class PROTOC_EXPORT KotlinGenerator : public CodeGenerator + { + public: + KotlinGenerator(); + ~KotlinGenerator() override; + + // implements CodeGenerator ---------------------------------------- + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override; + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(KotlinGenerator); + }; + + } // namespace java + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_JAVA_KOTLIN_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/java/names.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/names.h new file mode 100644 index 00000000..cd28c8dc --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/java/names.h @@ -0,0 +1,104 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Provides a mechanism for mapping a descriptor to the +// fully-qualified name of the corresponding Java class. + +#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__ +#define GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__ + +#include + +namespace google +{ + namespace protobuf + { + + class Descriptor; + class EnumDescriptor; + class FileDescriptor; + class FieldDescriptor; + class ServiceDescriptor; + + namespace compiler + { + namespace java + { + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified Java class name. + std::string ClassName(const Descriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified Java class name. + std::string ClassName(const EnumDescriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified Java class name. + std::string ClassName(const FileDescriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // The fully-qualified Java class name. + std::string ClassName(const ServiceDescriptor* descriptor); + + // Requires: + // descriptor != NULL + // + // Returns: + // Java package name. + std::string FileJavaPackage(const FileDescriptor* descriptor); + + // Requires: + // descriptor != NULL + // Returns: + // Capitalized camel case name field name. + std::string CapitalizedFieldName(const FieldDescriptor* descriptor); + + } // namespace java + } // namespace compiler + } // namespace protobuf +} // namespace google +#endif // GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_generator.h new file mode 100644 index 00000000..683f0d6f --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_generator.h @@ -0,0 +1,82 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Generates ObjectiveC code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__ + +#include +#include +#include + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace objectivec + { + + // CodeGenerator implementation which generates a ObjectiveC source file and + // header. If you create your own protocol compiler binary and you want it to + // support ObjectiveC output, you can do so by registering an instance of this + // CodeGenerator with the CommandLineInterface in your main() function. + class PROTOC_EXPORT ObjectiveCGenerator : public CodeGenerator + { + public: + ObjectiveCGenerator(); + ~ObjectiveCGenerator(); + + ObjectiveCGenerator(const ObjectiveCGenerator&) = delete; + ObjectiveCGenerator& operator=(const ObjectiveCGenerator&) = delete; + + // implements CodeGenerator ---------------------------------------- + bool HasGenerateAll() const override; + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* context, std::string* error) const override; + bool GenerateAll(const std::vector& files, const std::string& parameter, GeneratorContext* context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override + { + return FEATURE_PROTO3_OPTIONAL; + } + }; + + } // namespace objectivec + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_helpers.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_helpers.h new file mode 100644 index 00000000..f98fe6aa --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_helpers.h @@ -0,0 +1,368 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Helper functions for generating ObjectiveC code. + +#ifndef GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__ +#define GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__ + +#include +#include + +#include +#include +#include + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace objectivec + { + + // Get/Set the path to a file to load for objc class prefix lookups. + std::string PROTOC_EXPORT GetPackageToPrefixMappingsPath(); + void PROTOC_EXPORT SetPackageToPrefixMappingsPath( + const std::string& file_path + ); + // Get/Set if the proto package should be used to make the default prefix for + // symbols. This will then impact most of the type naming apis below. It is done + // as a global to not break any other generator reusing the methods since they + // are exported. + bool PROTOC_EXPORT UseProtoPackageAsDefaultPrefix(); + void PROTOC_EXPORT SetUseProtoPackageAsDefaultPrefix(bool on_or_off); + // Get/Set the path to a file to load as exceptions when + // `UseProtoPackageAsDefaultPrefix()` is `true`. An empty string means there + // should be no exceptions. + std::string PROTOC_EXPORT GetProtoPackagePrefixExceptionList(); + void PROTOC_EXPORT SetProtoPackagePrefixExceptionList( + const std::string& file_path + ); + + // Generator Prefix Validation Options (see objectivec_generator.cc for a + // description of each): + struct Options + { + Options(); + std::string expected_prefixes_path; + std::vector expected_prefixes_suppressions; + bool prefixes_must_be_registered; + bool require_prefixes; + }; + + // Escape C++ trigraphs by escaping question marks to "\?". + std::string PROTOC_EXPORT EscapeTrigraphs(const std::string& to_escape); + + // Remove white space from either end of a StringPiece. + void PROTOC_EXPORT TrimWhitespace(StringPiece* input); + + // Returns true if the name requires a ns_returns_not_retained attribute applied + // to it. + bool PROTOC_EXPORT IsRetainedName(const std::string& name); + + // Returns true if the name starts with "init" and will need to have special + // handling under ARC. + bool PROTOC_EXPORT IsInitName(const std::string& name); + + // Gets the objc_class_prefix or the prefix made from the proto package. + std::string PROTOC_EXPORT FileClassPrefix(const FileDescriptor* file); + + // Gets the path of the file we're going to generate (sans the .pb.h + // extension). The path will be dependent on the objectivec package + // declared in the proto package. + std::string PROTOC_EXPORT FilePath(const FileDescriptor* file); + + // Just like FilePath(), but without the directory part. + std::string PROTOC_EXPORT FilePathBasename(const FileDescriptor* file); + + // Gets the name of the root class we'll generate in the file. This class + // is not meant for external consumption, but instead contains helpers that + // the rest of the classes need + std::string PROTOC_EXPORT FileClassName(const FileDescriptor* file); + + // These return the fully-qualified class name corresponding to the given + // descriptor. + std::string PROTOC_EXPORT ClassName(const Descriptor* descriptor); + std::string PROTOC_EXPORT ClassName(const Descriptor* descriptor, std::string* out_suffix_added); + std::string PROTOC_EXPORT EnumName(const EnumDescriptor* descriptor); + + // Returns the fully-qualified name of the enum value corresponding to the + // the descriptor. + std::string PROTOC_EXPORT EnumValueName(const EnumValueDescriptor* descriptor); + + // Returns the name of the enum value corresponding to the descriptor. + std::string PROTOC_EXPORT EnumValueShortName(const EnumValueDescriptor* descriptor); + + // Reverse what an enum does. + std::string PROTOC_EXPORT UnCamelCaseEnumShortName(const std::string& name); + + // Returns the name to use for the extension (used as the method off the file's + // Root class). + std::string PROTOC_EXPORT ExtensionMethodName(const FieldDescriptor* descriptor); + + // Returns the transformed field name. + std::string PROTOC_EXPORT FieldName(const FieldDescriptor* field); + std::string PROTOC_EXPORT FieldNameCapitalized(const FieldDescriptor* field); + + // Returns the transformed oneof name. + std::string PROTOC_EXPORT OneofEnumName(const OneofDescriptor* descriptor); + std::string PROTOC_EXPORT OneofName(const OneofDescriptor* descriptor); + std::string PROTOC_EXPORT OneofNameCapitalized(const OneofDescriptor* descriptor); + + // Returns a symbol that can be used in C code to refer to an Objective C + // class without initializing the class. + std::string PROTOC_EXPORT ObjCClass(const std::string& class_name); + + // Declares an Objective C class without initializing the class so that it can + // be refrerred to by ObjCClass. + std::string PROTOC_EXPORT ObjCClassDeclaration(const std::string& class_name); + + inline bool HasPreservingUnknownEnumSemantics(const FileDescriptor* file) + { + return file->syntax() == FileDescriptor::SYNTAX_PROTO3; + } + + inline bool IsMapEntryMessage(const Descriptor* descriptor) + { + return descriptor->options().map_entry(); + } + + // Reverse of the above. + std::string PROTOC_EXPORT UnCamelCaseFieldName(const std::string& name, const FieldDescriptor* field); + + enum ObjectiveCType + { + OBJECTIVECTYPE_INT32, + OBJECTIVECTYPE_UINT32, + OBJECTIVECTYPE_INT64, + OBJECTIVECTYPE_UINT64, + OBJECTIVECTYPE_FLOAT, + OBJECTIVECTYPE_DOUBLE, + OBJECTIVECTYPE_BOOLEAN, + OBJECTIVECTYPE_STRING, + OBJECTIVECTYPE_DATA, + OBJECTIVECTYPE_ENUM, + OBJECTIVECTYPE_MESSAGE + }; + + enum FlagType + { + FLAGTYPE_DESCRIPTOR_INITIALIZATION, + FLAGTYPE_EXTENSION, + FLAGTYPE_FIELD + }; + + template + std::string GetOptionalDeprecatedAttribute(const TDescriptor* descriptor, const FileDescriptor* file = NULL, bool preSpace = true, bool postNewline = false) + { + bool isDeprecated = descriptor->options().deprecated(); + // The file is only passed when checking Messages & Enums, so those types + // get tagged. At the moment, it doesn't seem to make sense to tag every + // field or enum value with when the file is deprecated. + bool isFileLevelDeprecation = false; + if (!isDeprecated && file) + { + isFileLevelDeprecation = file->options().deprecated(); + isDeprecated = isFileLevelDeprecation; + } + if (isDeprecated) + { + std::string message; + const FileDescriptor* sourceFile = descriptor->file(); + if (isFileLevelDeprecation) + { + message = sourceFile->name() + " is deprecated."; + } + else + { + message = descriptor->full_name() + " is deprecated (see " + + sourceFile->name() + ")."; + } + + std::string result = std::string("GPB_DEPRECATED_MSG(\"") + message + "\")"; + if (preSpace) + { + result.insert(0, " "); + } + if (postNewline) + { + result.append("\n"); + } + return result; + } + else + { + return ""; + } + } + + std::string PROTOC_EXPORT GetCapitalizedType(const FieldDescriptor* field); + + ObjectiveCType PROTOC_EXPORT + GetObjectiveCType(FieldDescriptor::Type field_type); + + inline ObjectiveCType GetObjectiveCType(const FieldDescriptor* field) + { + return GetObjectiveCType(field->type()); + } + + bool PROTOC_EXPORT IsPrimitiveType(const FieldDescriptor* field); + bool PROTOC_EXPORT IsReferenceType(const FieldDescriptor* field); + + std::string PROTOC_EXPORT + GPBGenericValueFieldName(const FieldDescriptor* field); + std::string PROTOC_EXPORT DefaultValue(const FieldDescriptor* field); + bool PROTOC_EXPORT HasNonZeroDefaultValue(const FieldDescriptor* field); + + std::string PROTOC_EXPORT + BuildFlagsString(const FlagType type, const std::vector& strings); + + // Builds HeaderDoc/appledoc style comments out of the comments in the .proto + // file. + std::string PROTOC_EXPORT BuildCommentsString(const SourceLocation& location, bool prefer_single_line); + + // The name the commonly used by the library when built as a framework. + // This lines up to the name used in the CocoaPod. + extern PROTOC_EXPORT const char* const ProtobufLibraryFrameworkName; + // Returns the CPP symbol name to use as the gate for framework style imports + // for the given framework name to use. + std::string PROTOC_EXPORT + ProtobufFrameworkImportSymbol(const std::string& framework_name); + + // Checks if the file is one of the proto's bundled with the library. + bool PROTOC_EXPORT + IsProtobufLibraryBundledProtoFile(const FileDescriptor* file); + + // Checks the prefix for the given files and outputs any warnings as needed. If + // there are flat out errors, then out_error is filled in with the first error + // and the result is false. + bool PROTOC_EXPORT ValidateObjCClassPrefixes( + const std::vector& files, + const Options& validation_options, + std::string* out_error + ); + // Same was the other ValidateObjCClassPrefixes() calls, but the options all + // come from the environment variables. + bool PROTOC_EXPORT ValidateObjCClassPrefixes( + const std::vector& files, std::string* out_error + ); + + // Generate decode data needed for ObjC's GPBDecodeTextFormatName() to transform + // the input into the expected output. + class PROTOC_EXPORT TextFormatDecodeData + { + public: + TextFormatDecodeData(); + ~TextFormatDecodeData(); + + TextFormatDecodeData(const TextFormatDecodeData&) = delete; + TextFormatDecodeData& operator=(const TextFormatDecodeData&) = delete; + + void AddString(int32_t key, const std::string& input_for_decode, const std::string& desired_output); + size_t num_entries() const + { + return entries_.size(); + } + std::string Data() const; + + static std::string DecodeDataForString(const std::string& input_for_decode, const std::string& desired_output); + + private: + typedef std::pair DataEntry; + std::vector entries_; + }; + + // Helper for parsing simple files. + class PROTOC_EXPORT LineConsumer + { + public: + LineConsumer(); + virtual ~LineConsumer(); + virtual bool ConsumeLine(const StringPiece& line, std::string* out_error) = 0; + }; + + bool PROTOC_EXPORT ParseSimpleFile(const std::string& path, LineConsumer* line_consumer, std::string* out_error); + + bool PROTOC_EXPORT ParseSimpleStream(io::ZeroCopyInputStream& input_stream, const std::string& stream_name, LineConsumer* line_consumer, std::string* out_error); + + // Helper class for parsing framework import mappings and generating + // import statements. + class PROTOC_EXPORT ImportWriter + { + public: + ImportWriter(const std::string& generate_for_named_framework, const std::string& named_framework_to_proto_path_mappings_path, const std::string& runtime_import_prefix, bool include_wkt_imports); + ~ImportWriter(); + + void AddFile(const FileDescriptor* file, const std::string& header_extension); + void Print(io::Printer* printer) const; + + static void PrintRuntimeImports(io::Printer* printer, const std::vector& header_to_import, const std::string& runtime_import_prefix, bool default_cpp_symbol = false); + + private: + class ProtoFrameworkCollector : public LineConsumer + { + public: + ProtoFrameworkCollector(std::map* inout_proto_file_to_framework_name) : + map_(inout_proto_file_to_framework_name) + { + } + + virtual bool ConsumeLine(const StringPiece& line, std::string* out_error) override; + + private: + std::map* map_; + }; + + void ParseFrameworkMappings(); + + const std::string generate_for_named_framework_; + const std::string named_framework_to_proto_path_mappings_path_; + const std::string runtime_import_prefix_; + const bool include_wkt_imports_; + std::map proto_file_to_framework_name_; + bool need_to_parse_mapping_file_; + + std::vector protobuf_imports_; + std::vector other_framework_imports_; + std::vector other_imports_; + }; + + } // namespace objectivec + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/parser.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/parser.h new file mode 100644 index 00000000..f97e5c3e --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/parser.h @@ -0,0 +1,535 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// Implements parsing of .proto files to FileDescriptorProtos. + +#ifndef GOOGLE_PROTOBUF_COMPILER_PARSER_H__ +#define GOOGLE_PROTOBUF_COMPILER_PARSER_H__ + +#include +#include +#include +#include + +#include +#include +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + class Message; + + namespace compiler + { + + // Defined in this file. + class Parser; + class SourceLocationTable; + + // Implements parsing of protocol definitions (such as .proto files). + // + // Note that most users will be more interested in the Importer class. + // Parser is a lower-level class which simply converts a single .proto file + // to a FileDescriptorProto. It does not resolve import directives or perform + // many other kinds of validation needed to construct a complete + // FileDescriptor. + class PROTOBUF_EXPORT Parser + { + public: + Parser(); + ~Parser(); + + // Parse the entire input and construct a FileDescriptorProto representing + // it. Returns true if no errors occurred, false otherwise. + bool Parse(io::Tokenizer* input, FileDescriptorProto* file); + + // Optional features: + + // DEPRECATED: New code should use the SourceCodeInfo embedded in the + // FileDescriptorProto. + // + // Requests that locations of certain definitions be recorded to the given + // SourceLocationTable while parsing. This can be used to look up exact line + // and column numbers for errors reported by DescriptorPool during validation. + // Set to NULL (the default) to discard source location information. + void RecordSourceLocationsTo(SourceLocationTable* location_table) + { + source_location_table_ = location_table; + } + + // Requests that errors be recorded to the given ErrorCollector while + // parsing. Set to NULL (the default) to discard error messages. + void RecordErrorsTo(io::ErrorCollector* error_collector) + { + error_collector_ = error_collector; + } + + // Returns the identifier used in the "syntax = " declaration, if one was + // seen during the last call to Parse(), or the empty string otherwise. + const std::string& GetSyntaxIdentifier() + { + return syntax_identifier_; + } + + // If set true, input files will be required to begin with a syntax + // identifier. Otherwise, files may omit this. If a syntax identifier + // is provided, it must be 'syntax = "proto2";' and must appear at the + // top of this file regardless of whether or not it was required. + void SetRequireSyntaxIdentifier(bool value) + { + require_syntax_identifier_ = value; + } + + // Call SetStopAfterSyntaxIdentifier(true) to tell the parser to stop + // parsing as soon as it has seen the syntax identifier, or lack thereof. + // This is useful for quickly identifying the syntax of the file without + // parsing the whole thing. If this is enabled, no error will be recorded + // if the syntax identifier is something other than "proto2" (since + // presumably the caller intends to deal with that), but other kinds of + // errors (e.g. parse errors) will still be reported. When this is enabled, + // you may pass a NULL FileDescriptorProto to Parse(). + void SetStopAfterSyntaxIdentifier(bool value) + { + stop_after_syntax_identifier_ = value; + } + + private: + class LocationRecorder; + struct MapField; + + // ================================================================= + // Error recovery helpers + + // Consume the rest of the current statement. This consumes tokens + // until it sees one of: + // ';' Consumes the token and returns. + // '{' Consumes the brace then calls SkipRestOfBlock(). + // '}' Returns without consuming. + // EOF Returns (can't consume). + // The Parser often calls SkipStatement() after encountering a syntax + // error. This allows it to go on parsing the following lines, allowing + // it to report more than just one error in the file. + void SkipStatement(); + + // Consume the rest of the current block, including nested blocks, + // ending after the closing '}' is encountered and consumed, or at EOF. + void SkipRestOfBlock(); + + // ----------------------------------------------------------------- + // Single-token consuming helpers + // + // These make parsing code more readable. + + // True if the current token is TYPE_END. + inline bool AtEnd(); + + // True if the next token matches the given text. + inline bool LookingAt(const char* text); + // True if the next token is of the given type. + inline bool LookingAtType(io::Tokenizer::TokenType token_type); + + // If the next token exactly matches the text given, consume it and return + // true. Otherwise, return false without logging an error. + bool TryConsume(const char* text); + + // These attempt to read some kind of token from the input. If successful, + // they return true. Otherwise they return false and add the given error + // to the error list. + + // Consume a token with the exact text given. + bool Consume(const char* text, const char* error); + // Same as above, but automatically generates the error "Expected \"text\".", + // where "text" is the expected token text. + bool Consume(const char* text); + // Consume a token of type IDENTIFIER and store its text in "output". + bool ConsumeIdentifier(std::string* output, const char* error); + // Consume an integer and store its value in "output". + bool ConsumeInteger(int* output, const char* error); + // Consume a signed integer and store its value in "output". + bool ConsumeSignedInteger(int* output, const char* error); + // Consume a 64-bit integer and store its value in "output". If the value + // is greater than max_value, an error will be reported. + bool ConsumeInteger64(uint64_t max_value, uint64_t* output, const char* error); + // Consume a number and store its value in "output". This will accept + // tokens of either INTEGER or FLOAT type. + bool ConsumeNumber(double* output, const char* error); + // Consume a string literal and store its (unescaped) value in "output". + bool ConsumeString(std::string* output, const char* error); + + // Consume a token representing the end of the statement. Comments between + // this token and the next will be harvested for documentation. The given + // LocationRecorder should refer to the declaration that was just parsed; + // it will be populated with these comments. + // + // TODO(kenton): The LocationRecorder is const because historically locations + // have been passed around by const reference, for no particularly good + // reason. We should probably go through and change them all to mutable + // pointer to make this more intuitive. + bool TryConsumeEndOfDeclaration(const char* text, const LocationRecorder* location); + bool TryConsumeEndOfDeclarationFinishScope(const char* text, const LocationRecorder* location); + + bool ConsumeEndOfDeclaration(const char* text, const LocationRecorder* location); + + // ----------------------------------------------------------------- + // Error logging helpers + + // Invokes error_collector_->AddError(), if error_collector_ is not NULL. + void AddError(int line, int column, const std::string& error); + + // Invokes error_collector_->AddError() with the line and column number + // of the current token. + void AddError(const std::string& error); + + // Invokes error_collector_->AddWarning() with the line and column number + // of the current token. + void AddWarning(const std::string& warning); + + // Records a location in the SourceCodeInfo.location table (see + // descriptor.proto). We use RAII to ensure that the start and end locations + // are recorded -- the constructor records the start location and the + // destructor records the end location. Since the parser is + // recursive-descent, this works out beautifully. + class PROTOBUF_EXPORT LocationRecorder + { + public: + // Construct the file's "root" location. + LocationRecorder(Parser* parser); + + // Construct a location that represents a declaration nested within the + // given parent. E.g. a field's location is nested within the location + // for a message type. The parent's path will be copied, so you should + // call AddPath() only to add the path components leading from the parent + // to the child (as opposed to leading from the root to the child). + LocationRecorder(const LocationRecorder& parent); + + // Convenience constructors that call AddPath() one or two times. + LocationRecorder(const LocationRecorder& parent, int path1); + LocationRecorder(const LocationRecorder& parent, int path1, int path2); + + // Creates a recorder that generates locations into given source code info. + LocationRecorder(const LocationRecorder& parent, int path1, SourceCodeInfo* source_code_info); + + ~LocationRecorder(); + + // Add a path component. See SourceCodeInfo.Location.path in + // descriptor.proto. + void AddPath(int path_component); + + // By default the location is considered to start at the current token at + // the time the LocationRecorder is created. StartAt() sets the start + // location to the given token instead. + void StartAt(const io::Tokenizer::Token& token); + + // Start at the same location as some other LocationRecorder. + void StartAt(const LocationRecorder& other); + + // By default the location is considered to end at the previous token at + // the time the LocationRecorder is destroyed. EndAt() sets the end + // location to the given token instead. + void EndAt(const io::Tokenizer::Token& token); + + // Records the start point of this location to the SourceLocationTable that + // was passed to RecordSourceLocationsTo(), if any. SourceLocationTable + // is an older way of keeping track of source locations which is still + // used in some places. + void RecordLegacyLocation( + const Message* descriptor, + DescriptorPool::ErrorCollector::ErrorLocation location + ); + void RecordLegacyImportLocation(const Message* descriptor, const std::string& name); + + // Returns the number of path components in the recorder's current location. + int CurrentPathSize() const; + + // Attaches leading and trailing comments to the location. The two strings + // will be swapped into place, so after this is called *leading and + // *trailing will be empty. + // + // TODO(kenton): See comment on TryConsumeEndOfDeclaration(), above, for + // why this is const. + void AttachComments(std::string* leading, std::string* trailing, std::vector* detached_comments) const; + + private: + Parser* parser_; + SourceCodeInfo* source_code_info_; + SourceCodeInfo::Location* location_; + + void Init(const LocationRecorder& parent, SourceCodeInfo* source_code_info); + }; + + // ================================================================= + // Parsers for various language constructs + + // Parses the "syntax = \"proto2\";" line at the top of the file. Returns + // false if it failed to parse or if the syntax identifier was not + // recognized. + bool ParseSyntaxIdentifier(const LocationRecorder& parent); + + // These methods parse various individual bits of code. They return + // false if they completely fail to parse the construct. In this case, + // it is probably necessary to skip the rest of the statement to recover. + // However, if these methods return true, it does NOT mean that there + // were no errors; only that there were no *syntax* errors. For instance, + // if a service method is defined using proper syntax but uses a primitive + // type as its input or output, ParseMethodField() still returns true + // and only reports the error by calling AddError(). In practice, this + // makes logic much simpler for the caller. + + // Parse a top-level message, enum, service, etc. + bool ParseTopLevelStatement(FileDescriptorProto* file, const LocationRecorder& root_location); + + // Parse various language high-level language construrcts. + bool ParseMessageDefinition(DescriptorProto* message, const LocationRecorder& message_location, const FileDescriptorProto* containing_file); + bool ParseEnumDefinition(EnumDescriptorProto* enum_type, const LocationRecorder& enum_location, const FileDescriptorProto* containing_file); + bool ParseServiceDefinition(ServiceDescriptorProto* service, const LocationRecorder& service_location, const FileDescriptorProto* containing_file); + bool ParsePackage(FileDescriptorProto* file, const LocationRecorder& root_location, const FileDescriptorProto* containing_file); + bool ParseImport(RepeatedPtrField* dependency, RepeatedField* public_dependency, RepeatedField* weak_dependency, const LocationRecorder& root_location, const FileDescriptorProto* containing_file); + + // These methods parse the contents of a message, enum, or service type and + // add them to the given object. They consume the entire block including + // the beginning and ending brace. + bool ParseMessageBlock(DescriptorProto* message, const LocationRecorder& message_location, const FileDescriptorProto* containing_file); + bool ParseEnumBlock(EnumDescriptorProto* enum_type, const LocationRecorder& enum_location, const FileDescriptorProto* containing_file); + bool ParseServiceBlock(ServiceDescriptorProto* service, const LocationRecorder& service_location, const FileDescriptorProto* containing_file); + + // Parse one statement within a message, enum, or service block, including + // final semicolon. + bool ParseMessageStatement(DescriptorProto* message, const LocationRecorder& message_location, const FileDescriptorProto* containing_file); + bool ParseEnumStatement(EnumDescriptorProto* message, const LocationRecorder& enum_location, const FileDescriptorProto* containing_file); + bool ParseServiceStatement(ServiceDescriptorProto* message, const LocationRecorder& service_location, const FileDescriptorProto* containing_file); + + // Parse a field of a message. If the field is a group, its type will be + // added to "messages". + // + // parent_location and location_field_number_for_nested_type are needed when + // parsing groups -- we need to generate a nested message type within the + // parent and record its location accordingly. Since the parent could be + // either a FileDescriptorProto or a DescriptorProto, we must pass in the + // correct field number to use. + bool ParseMessageField(FieldDescriptorProto* field, RepeatedPtrField* messages, const LocationRecorder& parent_location, int location_field_number_for_nested_type, const LocationRecorder& field_location, const FileDescriptorProto* containing_file); + + // Like ParseMessageField() but expects the label has already been filled in + // by the caller. + bool ParseMessageFieldNoLabel(FieldDescriptorProto* field, RepeatedPtrField* messages, const LocationRecorder& parent_location, int location_field_number_for_nested_type, const LocationRecorder& field_location, const FileDescriptorProto* containing_file); + + bool ParseMapType(MapField* map_field, FieldDescriptorProto* field, LocationRecorder& type_name_location); + + // Parse an "extensions" declaration. + bool ParseExtensions(DescriptorProto* message, const LocationRecorder& extensions_location, const FileDescriptorProto* containing_file); + + // Parse a "reserved" declaration. + bool ParseReserved(DescriptorProto* message, const LocationRecorder& message_location); + bool ParseReservedNames(DescriptorProto* message, const LocationRecorder& parent_location); + bool ParseReservedNumbers(DescriptorProto* message, const LocationRecorder& parent_location); + bool ParseReserved(EnumDescriptorProto* message, const LocationRecorder& message_location); + bool ParseReservedNames(EnumDescriptorProto* message, const LocationRecorder& parent_location); + bool ParseReservedNumbers(EnumDescriptorProto* message, const LocationRecorder& parent_location); + + // Parse an "extend" declaration. (See also comments for + // ParseMessageField().) + bool ParseExtend(RepeatedPtrField* extensions, RepeatedPtrField* messages, const LocationRecorder& parent_location, int location_field_number_for_nested_type, const LocationRecorder& extend_location, const FileDescriptorProto* containing_file); + + // Parse a "oneof" declaration. The caller is responsible for setting + // oneof_decl->label() since it will have had to parse the label before it + // knew it was parsing a oneof. + bool ParseOneof(OneofDescriptorProto* oneof_decl, DescriptorProto* containing_type, int oneof_index, const LocationRecorder& oneof_location, const LocationRecorder& containing_type_location, const FileDescriptorProto* containing_file); + + // Parse a single enum value within an enum block. + bool ParseEnumConstant(EnumValueDescriptorProto* enum_value, const LocationRecorder& enum_value_location, const FileDescriptorProto* containing_file); + + // Parse enum constant options, i.e. the list in square brackets at the end + // of the enum constant value definition. + bool ParseEnumConstantOptions(EnumValueDescriptorProto* value, const LocationRecorder& enum_value_location, const FileDescriptorProto* containing_file); + + // Parse a single method within a service definition. + bool ParseServiceMethod(MethodDescriptorProto* method, const LocationRecorder& method_location, const FileDescriptorProto* containing_file); + + // Parse options of a single method or stream. + bool ParseMethodOptions(const LocationRecorder& parent_location, const FileDescriptorProto* containing_file, const int optionsFieldNumber, Message* mutable_options); + + // Parse "required", "optional", or "repeated" and fill in "label" + // with the value. Returns true if such a label is consumed. + bool ParseLabel(FieldDescriptorProto::Label* label, const LocationRecorder& field_location); + + // Parse a type name and fill in "type" (if it is a primitive) or + // "type_name" (if it is not) with the type parsed. + bool ParseType(FieldDescriptorProto::Type* type, std::string* type_name); + // Parse a user-defined type and fill in "type_name" with the name. + // If a primitive type is named, it is treated as an error. + bool ParseUserDefinedType(std::string* type_name); + + // Parses field options, i.e. the stuff in square brackets at the end + // of a field definition. Also parses default value. + bool ParseFieldOptions(FieldDescriptorProto* field, const LocationRecorder& field_location, const FileDescriptorProto* containing_file); + + // Parse the "default" option. This needs special handling because its + // type is the field's type. + bool ParseDefaultAssignment(FieldDescriptorProto* field, const LocationRecorder& field_location, const FileDescriptorProto* containing_file); + + bool ParseJsonName(FieldDescriptorProto* field, const LocationRecorder& field_location, const FileDescriptorProto* containing_file); + + enum OptionStyle + { + OPTION_ASSIGNMENT, // just "name = value" + OPTION_STATEMENT // "option name = value;" + }; + + // Parse a single option name/value pair, e.g. "ctype = CORD". The name + // identifies a field of the given Message, and the value of that field + // is set to the parsed value. + bool ParseOption(Message* options, const LocationRecorder& options_location, const FileDescriptorProto* containing_file, OptionStyle style); + + // Parses a single part of a multipart option name. A multipart name consists + // of names separated by dots. Each name is either an identifier or a series + // of identifiers separated by dots and enclosed in parentheses. E.g., + // "foo.(bar.baz).moo". + bool ParseOptionNamePart(UninterpretedOption* uninterpreted_option, const LocationRecorder& part_location, const FileDescriptorProto* containing_file); + + // Parses a string surrounded by balanced braces. Strips off the outer + // braces and stores the enclosed string in *value. + // E.g., + // { foo } *value gets 'foo' + // { foo { bar: box } } *value gets 'foo { bar: box }' + // {} *value gets '' + // + // REQUIRES: LookingAt("{") + // When finished successfully, we are looking at the first token past + // the ending brace. + bool ParseUninterpretedBlock(std::string* value); + + struct MapField + { + // Whether the field is a map field. + bool is_map_field; + // The types of the key and value if they are primitive types. + FieldDescriptorProto::Type key_type; + FieldDescriptorProto::Type value_type; + // Or the type names string if the types are customized types. + std::string key_type_name; + std::string value_type_name; + + MapField() : + is_map_field(false) + { + } + }; + // Desugar the map syntax to generate a nested map entry message. + void GenerateMapEntry(const MapField& map_field, FieldDescriptorProto* field, RepeatedPtrField* messages); + + // Whether fields without label default to optional fields. + bool DefaultToOptionalFields() const + { + return syntax_identifier_ == "proto3"; + } + + bool ValidateEnum(const EnumDescriptorProto* proto); + + // ================================================================= + + io::Tokenizer* input_; + io::ErrorCollector* error_collector_; + SourceCodeInfo* source_code_info_; + SourceLocationTable* source_location_table_; // legacy + bool had_errors_; + bool require_syntax_identifier_; + bool stop_after_syntax_identifier_; + std::string syntax_identifier_; + + // Leading doc comments for the next declaration. These are not complete + // yet; use ConsumeEndOfDeclaration() to get the complete comments. + std::string upcoming_doc_comments_; + + // Detached comments are not connected to any syntax entities. Elements in + // this vector are paragraphs of comments separated by empty lines. The + // detached comments will be put into the leading_detached_comments field for + // the next element (See SourceCodeInfo.Location in descriptor.proto), when + // ConsumeEndOfDeclaration() is called. + std::vector upcoming_detached_comments_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Parser); + }; + + // A table mapping (descriptor, ErrorLocation) pairs -- as reported by + // DescriptorPool when validating descriptors -- to line and column numbers + // within the original source code. + // + // This is semi-obsolete: FileDescriptorProto.source_code_info now contains + // far more complete information about source locations. However, as of this + // writing you still need to use SourceLocationTable when integrating with + // DescriptorPool. + class PROTOBUF_EXPORT SourceLocationTable + { + public: + SourceLocationTable(); + ~SourceLocationTable(); + + // Finds the precise location of the given error and fills in *line and + // *column with the line and column numbers. If not found, sets *line to + // -1 and *column to 0 (since line = -1 is used to mean "error has no exact + // location" in the ErrorCollector interface). Returns true if found, false + // otherwise. + bool Find(const Message* descriptor, DescriptorPool::ErrorCollector::ErrorLocation location, int* line, int* column) const; + bool FindImport(const Message* descriptor, const std::string& name, int* line, int* column) const; + + // Adds a location to the table. + void Add(const Message* descriptor, DescriptorPool::ErrorCollector::ErrorLocation location, int line, int column); + void AddImport(const Message* descriptor, const std::string& name, int line, int column); + + // Clears the contents of the table. + void Clear(); + + private: + typedef std::map< + std::pair, + std::pair> + LocationMap; + LocationMap location_map_; + std::map, std::pair> + import_location_map_; + }; + + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PARSER_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/php/php_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/php/php_generator.h new file mode 100644 index 00000000..74d8088d --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/php/php_generator.h @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__ + +#include +#include + +#include + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace php + { + + struct Options; + + class PROTOC_EXPORT Generator : public CodeGenerator + { + public: + virtual bool Generate( + const FileDescriptor* file, + const std::string& parameter, + GeneratorContext* generator_context, + std::string* error + ) const override; + + bool GenerateAll(const std::vector& files, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override + { + return FEATURE_PROTO3_OPTIONAL; + } + + private: + bool Generate( + const FileDescriptor* file, + const Options& options, + GeneratorContext* generator_context, + std::string* error + ) const; + }; + + // To skip reserved keywords in php, some generated classname are prefixed. + // Other code generators may need following API to figure out the actual + // classname. + PROTOC_EXPORT std::string GeneratedClassName(const Descriptor* desc); + PROTOC_EXPORT std::string GeneratedClassName(const EnumDescriptor* desc); + PROTOC_EXPORT std::string GeneratedClassName(const ServiceDescriptor* desc); + + inline bool IsWrapperType(const FieldDescriptor* descriptor) + { + return descriptor->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE && + descriptor->message_type()->file()->name() == "google/protobuf/wrappers.proto"; + } + + } // namespace php + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.h new file mode 100644 index 00000000..8571743e --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.h @@ -0,0 +1,95 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// Front-end for protoc code generator plugins written in C++. +// +// To implement a protoc plugin in C++, simply write an implementation of +// CodeGenerator, then create a main() function like: +// int main(int argc, char* argv[]) { +// MyCodeGenerator generator; +// return google::protobuf::compiler::PluginMain(argc, argv, &generator); +// } +// You must link your plugin against libprotobuf and libprotoc. +// +// The core part of PluginMain is to invoke the given CodeGenerator on a +// CodeGeneratorRequest to generate a CodeGeneratorResponse. This part is +// abstracted out and made into function GenerateCode so that it can be reused, +// for example, to implement a variant of PluginMain that does some +// preprocessing on the input CodeGeneratorRequest before feeding the request +// to the given code generator. +// +// To get protoc to use the plugin, do one of the following: +// * Place the plugin binary somewhere in the PATH and give it the name +// "protoc-gen-NAME" (replacing "NAME" with the name of your plugin). If you +// then invoke protoc with the parameter --NAME_out=OUT_DIR (again, replace +// "NAME" with your plugin's name), protoc will invoke your plugin to generate +// the output, which will be placed in OUT_DIR. +// * Place the plugin binary anywhere, with any name, and pass the --plugin +// parameter to protoc to direct it to your plugin like so: +// protoc --plugin=protoc-gen-NAME=path/to/mybinary --NAME_out=OUT_DIR +// On Windows, make sure to include the .exe suffix: +// protoc --plugin=protoc-gen-NAME=path/to/mybinary.exe --NAME_out=OUT_DIR + +#ifndef GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__ +#define GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__ + +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + + class CodeGenerator; // code_generator.h + class CodeGeneratorRequest; + class CodeGeneratorResponse; + + // Implements main() for a protoc plugin exposing the given code generator. + PROTOC_EXPORT int PluginMain(int argc, char* argv[], const CodeGenerator* generator); + + // Generates code using the given code generator. Returns true if the code + // generation is successful. If the code generation fails, error_msg may be + // populated to describe the failure cause. + bool GenerateCode(const CodeGeneratorRequest& request, const CodeGenerator& generator, CodeGeneratorResponse* response, std::string* error_msg); + + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.pb.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.pb.h new file mode 100644 index 00000000..7d6bd399 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.pb.h @@ -0,0 +1,2362 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/protobuf/compiler/plugin.proto + +#ifndef GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fcompiler_2fplugin_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fcompiler_2fplugin_2eproto + +#include +#include + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021012 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +#include +#include +// @@protoc_insertion_point(includes) +#include +#define PROTOBUF_INTERNAL_EXPORT_google_2fprotobuf_2fcompiler_2fplugin_2eproto PROTOC_EXPORT +#ifdef major +#undef major +#endif +#ifdef minor +#undef minor +#endif +PROTOBUF_NAMESPACE_OPEN +namespace internal +{ + class AnyMetadata; +} // namespace internal +PROTOBUF_NAMESPACE_CLOSE + +// Internal implementation detail -- do not use these members. +struct PROTOC_EXPORT TableStruct_google_2fprotobuf_2fcompiler_2fplugin_2eproto +{ + static const uint32_t offsets[]; +}; +PROTOC_EXPORT extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_google_2fprotobuf_2fcompiler_2fplugin_2eproto; +PROTOBUF_NAMESPACE_OPEN +namespace compiler +{ + class CodeGeneratorRequest; + struct CodeGeneratorRequestDefaultTypeInternal; + PROTOC_EXPORT extern CodeGeneratorRequestDefaultTypeInternal _CodeGeneratorRequest_default_instance_; + class CodeGeneratorResponse; + struct CodeGeneratorResponseDefaultTypeInternal; + PROTOC_EXPORT extern CodeGeneratorResponseDefaultTypeInternal _CodeGeneratorResponse_default_instance_; + class CodeGeneratorResponse_File; + struct CodeGeneratorResponse_FileDefaultTypeInternal; + PROTOC_EXPORT extern CodeGeneratorResponse_FileDefaultTypeInternal _CodeGeneratorResponse_File_default_instance_; + class Version; + struct VersionDefaultTypeInternal; + PROTOC_EXPORT extern VersionDefaultTypeInternal _Version_default_instance_; +} // namespace compiler +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN +template<> +PROTOC_EXPORT ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorRequest* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorRequest>(Arena*); +template<> +PROTOC_EXPORT ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse>(Arena*); +template<> +PROTOC_EXPORT ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File>(Arena*); +template<> +PROTOC_EXPORT ::PROTOBUF_NAMESPACE_ID::compiler::Version* Arena::CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::compiler::Version>(Arena*); +PROTOBUF_NAMESPACE_CLOSE +PROTOBUF_NAMESPACE_OPEN +namespace compiler +{ + + enum CodeGeneratorResponse_Feature : int + { + CodeGeneratorResponse_Feature_FEATURE_NONE = 0, + CodeGeneratorResponse_Feature_FEATURE_PROTO3_OPTIONAL = 1 + }; + PROTOC_EXPORT bool CodeGeneratorResponse_Feature_IsValid(int value); + constexpr CodeGeneratorResponse_Feature CodeGeneratorResponse_Feature_Feature_MIN = CodeGeneratorResponse_Feature_FEATURE_NONE; + constexpr CodeGeneratorResponse_Feature CodeGeneratorResponse_Feature_Feature_MAX = CodeGeneratorResponse_Feature_FEATURE_PROTO3_OPTIONAL; + constexpr int CodeGeneratorResponse_Feature_Feature_ARRAYSIZE = CodeGeneratorResponse_Feature_Feature_MAX + 1; + + PROTOC_EXPORT const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CodeGeneratorResponse_Feature_descriptor(); + template + inline const std::string& CodeGeneratorResponse_Feature_Name(T enum_t_value) + { + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function CodeGeneratorResponse_Feature_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + CodeGeneratorResponse_Feature_descriptor(), enum_t_value + ); + } + inline bool CodeGeneratorResponse_Feature_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, CodeGeneratorResponse_Feature* value + ) + { + return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( + CodeGeneratorResponse_Feature_descriptor(), name, value + ); + } + // =================================================================== + + class PROTOC_EXPORT Version final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.Version) */ + { + public: + inline Version() : + Version(nullptr) + { + } + ~Version() override; + explicit PROTOBUF_CONSTEXPR Version(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + Version(const Version& from); + Version(Version&& from) noexcept + : + Version() + { + *this = ::std::move(from); + } + + inline Version& operator=(const Version& from) + { + CopyFrom(from); + return *this; + } + inline Version& operator=(Version&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const + { + return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); + } + inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() + { + return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const Version& default_instance() + { + return *internal_default_instance(); + } + static inline const Version* internal_default_instance() + { + return reinterpret_cast( + &_Version_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 0; + + friend void swap(Version& a, Version& b) + { + a.Swap(&b); + } + inline void Swap(Version* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(Version* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + Version* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const Version& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const Version& from) + { + Version::MergeImpl(*this, from); + } + + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(Version* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.compiler.Version"; + } + + protected: + explicit Version(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + + public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kSuffixFieldNumber = 4, + kMajorFieldNumber = 1, + kMinorFieldNumber = 2, + kPatchFieldNumber = 3, + }; + // optional string suffix = 4; + bool has_suffix() const; + + private: + bool _internal_has_suffix() const; + + public: + void clear_suffix(); + const std::string& suffix() const; + template + void set_suffix(ArgT0&& arg0, ArgT... args); + std::string* mutable_suffix(); + PROTOBUF_NODISCARD std::string* release_suffix(); + void set_allocated_suffix(std::string* suffix); + + private: + const std::string& _internal_suffix() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_suffix(const std::string& value); + std::string* _internal_mutable_suffix(); + + public: + // optional int32 major = 1; + bool has_major() const; + + private: + bool _internal_has_major() const; + + public: + void clear_major(); + int32_t major() const; + void set_major(int32_t value); + + private: + int32_t _internal_major() const; + void _internal_set_major(int32_t value); + + public: + // optional int32 minor = 2; + bool has_minor() const; + + private: + bool _internal_has_minor() const; + + public: + void clear_minor(); + int32_t minor() const; + void set_minor(int32_t value); + + private: + int32_t _internal_minor() const; + void _internal_set_minor(int32_t value); + + public: + // optional int32 patch = 3; + bool has_patch() const; + + private: + bool _internal_has_patch() const; + + public: + void clear_patch(); + int32_t patch() const; + void set_patch(int32_t value); + + private: + int32_t _internal_patch() const; + void _internal_set_patch(int32_t value); + + public: + // @@protoc_insertion_point(class_scope:google.protobuf.compiler.Version) + + private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr suffix_; + int32_t major_; + int32_t minor_; + int32_t patch_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fcompiler_2fplugin_2eproto; + }; + // ------------------------------------------------------------------- + + class PROTOC_EXPORT CodeGeneratorRequest final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorRequest) */ + { + public: + inline CodeGeneratorRequest() : + CodeGeneratorRequest(nullptr) + { + } + ~CodeGeneratorRequest() override; + explicit PROTOBUF_CONSTEXPR CodeGeneratorRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CodeGeneratorRequest(const CodeGeneratorRequest& from); + CodeGeneratorRequest(CodeGeneratorRequest&& from) noexcept + : + CodeGeneratorRequest() + { + *this = ::std::move(from); + } + + inline CodeGeneratorRequest& operator=(const CodeGeneratorRequest& from) + { + CopyFrom(from); + return *this; + } + inline CodeGeneratorRequest& operator=(CodeGeneratorRequest&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const + { + return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); + } + inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() + { + return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const CodeGeneratorRequest& default_instance() + { + return *internal_default_instance(); + } + static inline const CodeGeneratorRequest* internal_default_instance() + { + return reinterpret_cast( + &_CodeGeneratorRequest_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(CodeGeneratorRequest& a, CodeGeneratorRequest& b) + { + a.Swap(&b); + } + inline void Swap(CodeGeneratorRequest* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CodeGeneratorRequest* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CodeGeneratorRequest* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CodeGeneratorRequest& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const CodeGeneratorRequest& from) + { + CodeGeneratorRequest::MergeImpl(*this, from); + } + + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CodeGeneratorRequest* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.compiler.CodeGeneratorRequest"; + } + + protected: + explicit CodeGeneratorRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + + public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kFileToGenerateFieldNumber = 1, + kProtoFileFieldNumber = 15, + kParameterFieldNumber = 2, + kCompilerVersionFieldNumber = 3, + }; + // repeated string file_to_generate = 1; + int file_to_generate_size() const; + + private: + int _internal_file_to_generate_size() const; + + public: + void clear_file_to_generate(); + const std::string& file_to_generate(int index) const; + std::string* mutable_file_to_generate(int index); + void set_file_to_generate(int index, const std::string& value); + void set_file_to_generate(int index, std::string&& value); + void set_file_to_generate(int index, const char* value); + void set_file_to_generate(int index, const char* value, size_t size); + std::string* add_file_to_generate(); + void add_file_to_generate(const std::string& value); + void add_file_to_generate(std::string&& value); + void add_file_to_generate(const char* value); + void add_file_to_generate(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& file_to_generate() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_file_to_generate(); + + private: + const std::string& _internal_file_to_generate(int index) const; + std::string* _internal_add_file_to_generate(); + + public: + // repeated .google.protobuf.FileDescriptorProto proto_file = 15; + int proto_file_size() const; + + private: + int _internal_proto_file_size() const; + + public: + void clear_proto_file(); + ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* mutable_proto_file(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::FileDescriptorProto>* + mutable_proto_file(); + + private: + const ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto& _internal_proto_file(int index) const; + ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* _internal_add_proto_file(); + + public: + const ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto& proto_file(int index) const; + ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* add_proto_file(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::FileDescriptorProto>& + proto_file() const; + + // optional string parameter = 2; + bool has_parameter() const; + + private: + bool _internal_has_parameter() const; + + public: + void clear_parameter(); + const std::string& parameter() const; + template + void set_parameter(ArgT0&& arg0, ArgT... args); + std::string* mutable_parameter(); + PROTOBUF_NODISCARD std::string* release_parameter(); + void set_allocated_parameter(std::string* parameter); + + private: + const std::string& _internal_parameter() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_parameter(const std::string& value); + std::string* _internal_mutable_parameter(); + + public: + // optional .google.protobuf.compiler.Version compiler_version = 3; + bool has_compiler_version() const; + + private: + bool _internal_has_compiler_version() const; + + public: + void clear_compiler_version(); + const ::PROTOBUF_NAMESPACE_ID::compiler::Version& compiler_version() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::compiler::Version* release_compiler_version(); + ::PROTOBUF_NAMESPACE_ID::compiler::Version* mutable_compiler_version(); + void set_allocated_compiler_version(::PROTOBUF_NAMESPACE_ID::compiler::Version* compiler_version); + + private: + const ::PROTOBUF_NAMESPACE_ID::compiler::Version& _internal_compiler_version() const; + ::PROTOBUF_NAMESPACE_ID::compiler::Version* _internal_mutable_compiler_version(); + + public: + void unsafe_arena_set_allocated_compiler_version( + ::PROTOBUF_NAMESPACE_ID::compiler::Version* compiler_version + ); + ::PROTOBUF_NAMESPACE_ID::compiler::Version* unsafe_arena_release_compiler_version(); + + // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest) + + private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField file_to_generate_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::FileDescriptorProto> proto_file_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr parameter_; + ::PROTOBUF_NAMESPACE_ID::compiler::Version* compiler_version_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fcompiler_2fplugin_2eproto; + }; + // ------------------------------------------------------------------- + + class PROTOC_EXPORT CodeGeneratorResponse_File final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorResponse.File) */ + { + public: + inline CodeGeneratorResponse_File() : + CodeGeneratorResponse_File(nullptr) + { + } + ~CodeGeneratorResponse_File() override; + explicit PROTOBUF_CONSTEXPR CodeGeneratorResponse_File(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CodeGeneratorResponse_File(const CodeGeneratorResponse_File& from); + CodeGeneratorResponse_File(CodeGeneratorResponse_File&& from) noexcept + : + CodeGeneratorResponse_File() + { + *this = ::std::move(from); + } + + inline CodeGeneratorResponse_File& operator=(const CodeGeneratorResponse_File& from) + { + CopyFrom(from); + return *this; + } + inline CodeGeneratorResponse_File& operator=(CodeGeneratorResponse_File&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const + { + return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); + } + inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() + { + return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const CodeGeneratorResponse_File& default_instance() + { + return *internal_default_instance(); + } + static inline const CodeGeneratorResponse_File* internal_default_instance() + { + return reinterpret_cast( + &_CodeGeneratorResponse_File_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 2; + + friend void swap(CodeGeneratorResponse_File& a, CodeGeneratorResponse_File& b) + { + a.Swap(&b); + } + inline void Swap(CodeGeneratorResponse_File* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CodeGeneratorResponse_File* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CodeGeneratorResponse_File* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CodeGeneratorResponse_File& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const CodeGeneratorResponse_File& from) + { + CodeGeneratorResponse_File::MergeImpl(*this, from); + } + + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CodeGeneratorResponse_File* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.compiler.CodeGeneratorResponse.File"; + } + + protected: + explicit CodeGeneratorResponse_File(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + + public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int + { + kNameFieldNumber = 1, + kInsertionPointFieldNumber = 2, + kContentFieldNumber = 15, + kGeneratedCodeInfoFieldNumber = 16, + }; + // optional string name = 1; + bool has_name() const; + + private: + bool _internal_has_name() const; + + public: + void clear_name(); + const std::string& name() const; + template + void set_name(ArgT0&& arg0, ArgT... args); + std::string* mutable_name(); + PROTOBUF_NODISCARD std::string* release_name(); + void set_allocated_name(std::string* name); + + private: + const std::string& _internal_name() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_name(const std::string& value); + std::string* _internal_mutable_name(); + + public: + // optional string insertion_point = 2; + bool has_insertion_point() const; + + private: + bool _internal_has_insertion_point() const; + + public: + void clear_insertion_point(); + const std::string& insertion_point() const; + template + void set_insertion_point(ArgT0&& arg0, ArgT... args); + std::string* mutable_insertion_point(); + PROTOBUF_NODISCARD std::string* release_insertion_point(); + void set_allocated_insertion_point(std::string* insertion_point); + + private: + const std::string& _internal_insertion_point() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_insertion_point(const std::string& value); + std::string* _internal_mutable_insertion_point(); + + public: + // optional string content = 15; + bool has_content() const; + + private: + bool _internal_has_content() const; + + public: + void clear_content(); + const std::string& content() const; + template + void set_content(ArgT0&& arg0, ArgT... args); + std::string* mutable_content(); + PROTOBUF_NODISCARD std::string* release_content(); + void set_allocated_content(std::string* content); + + private: + const std::string& _internal_content() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_content(const std::string& value); + std::string* _internal_mutable_content(); + + public: + // optional .google.protobuf.GeneratedCodeInfo generated_code_info = 16; + bool has_generated_code_info() const; + + private: + bool _internal_has_generated_code_info() const; + + public: + void clear_generated_code_info(); + const ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo& generated_code_info() const; + PROTOBUF_NODISCARD ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* release_generated_code_info(); + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* mutable_generated_code_info(); + void set_allocated_generated_code_info(::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* generated_code_info); + + private: + const ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo& _internal_generated_code_info() const; + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* _internal_mutable_generated_code_info(); + + public: + void unsafe_arena_set_allocated_generated_code_info( + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* generated_code_info + ); + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* unsafe_arena_release_generated_code_info(); + + // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File) + + private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr insertion_point_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr content_; + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* generated_code_info_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fcompiler_2fplugin_2eproto; + }; + // ------------------------------------------------------------------- + + class PROTOC_EXPORT CodeGeneratorResponse final : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorResponse) */ + { + public: + inline CodeGeneratorResponse() : + CodeGeneratorResponse(nullptr) + { + } + ~CodeGeneratorResponse() override; + explicit PROTOBUF_CONSTEXPR CodeGeneratorResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CodeGeneratorResponse(const CodeGeneratorResponse& from); + CodeGeneratorResponse(CodeGeneratorResponse&& from) noexcept + : + CodeGeneratorResponse() + { + *this = ::std::move(from); + } + + inline CodeGeneratorResponse& operator=(const CodeGeneratorResponse& from) + { + CopyFrom(from); + return *this; + } + inline CodeGeneratorResponse& operator=(CodeGeneratorResponse&& from) noexcept + { + if (this == &from) + return *this; + if (GetOwningArena() == from.GetOwningArena() +#ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr +#endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) + { + InternalSwap(&from); + } + else + { + CopyFrom(from); + } + return *this; + } + + inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const + { + return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); + } + inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() + { + return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() + { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() + { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() + { + return default_instance().GetMetadata().reflection; + } + static const CodeGeneratorResponse& default_instance() + { + return *internal_default_instance(); + } + static inline const CodeGeneratorResponse* internal_default_instance() + { + return reinterpret_cast( + &_CodeGeneratorResponse_default_instance_ + ); + } + static constexpr int kIndexInFileMessages = + 3; + + friend void swap(CodeGeneratorResponse& a, CodeGeneratorResponse& b) + { + a.Swap(&b); + } + inline void Swap(CodeGeneratorResponse* other) + { + if (other == this) + return; +#ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) + { +#else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) + { +#endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } + else + { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CodeGeneratorResponse* other) + { + if (other == this) + return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CodeGeneratorResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final + { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom; + void CopyFrom(const CodeGeneratorResponse& from); + using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom; + void MergeFrom(const CodeGeneratorResponse& from) + { + CodeGeneratorResponse::MergeImpl(*this, from); + } + + private: + static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg); + + public: + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const final; + int GetCachedSize() const final + { + return _impl_._cached_size_.Get(); + } + + private: + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); + void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(CodeGeneratorResponse* other); + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() + { + return "google.protobuf.compiler.CodeGeneratorResponse"; + } + + protected: + explicit CodeGeneratorResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); + + public: + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData* GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + typedef CodeGeneratorResponse_File File; + + typedef CodeGeneratorResponse_Feature Feature; + static constexpr Feature FEATURE_NONE = + CodeGeneratorResponse_Feature_FEATURE_NONE; + static constexpr Feature FEATURE_PROTO3_OPTIONAL = + CodeGeneratorResponse_Feature_FEATURE_PROTO3_OPTIONAL; + static inline bool Feature_IsValid(int value) + { + return CodeGeneratorResponse_Feature_IsValid(value); + } + static constexpr Feature Feature_MIN = + CodeGeneratorResponse_Feature_Feature_MIN; + static constexpr Feature Feature_MAX = + CodeGeneratorResponse_Feature_Feature_MAX; + static constexpr int Feature_ARRAYSIZE = + CodeGeneratorResponse_Feature_Feature_ARRAYSIZE; + static inline const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* + Feature_descriptor() + { + return CodeGeneratorResponse_Feature_descriptor(); + } + template + static inline const std::string& Feature_Name(T enum_t_value) + { + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function Feature_Name."); + return CodeGeneratorResponse_Feature_Name(enum_t_value); + } + static inline bool Feature_Parse(::PROTOBUF_NAMESPACE_ID::ConstStringParam name, Feature* value) + { + return CodeGeneratorResponse_Feature_Parse(name, value); + } + + // accessors ------------------------------------------------------- + + enum : int + { + kFileFieldNumber = 15, + kErrorFieldNumber = 1, + kSupportedFeaturesFieldNumber = 2, + }; + // repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; + int file_size() const; + + private: + int _internal_file_size() const; + + public: + void clear_file(); + ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* mutable_file(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File>* + mutable_file(); + + private: + const ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File& _internal_file(int index) const; + ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* _internal_add_file(); + + public: + const ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File& file(int index) const; + ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* add_file(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File>& + file() const; + + // optional string error = 1; + bool has_error() const; + + private: + bool _internal_has_error() const; + + public: + void clear_error(); + const std::string& error() const; + template + void set_error(ArgT0&& arg0, ArgT... args); + std::string* mutable_error(); + PROTOBUF_NODISCARD std::string* release_error(); + void set_allocated_error(std::string* error); + + private: + const std::string& _internal_error() const; + inline PROTOBUF_ALWAYS_INLINE void _internal_set_error(const std::string& value); + std::string* _internal_mutable_error(); + + public: + // optional uint64 supported_features = 2; + bool has_supported_features() const; + + private: + bool _internal_has_supported_features() const; + + public: + void clear_supported_features(); + uint64_t supported_features() const; + void set_supported_features(uint64_t value); + + private: + uint64_t _internal_supported_features() const; + void _internal_set_supported_features(uint64_t value); + + public: + // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse) + + private: + class _Internal; + + template + friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + struct Impl_ + { + ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File> file_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr error_; + uint64_t supported_features_; + }; + union + { + Impl_ _impl_; + }; + friend struct ::TableStruct_google_2fprotobuf_2fcompiler_2fplugin_2eproto; + }; + // =================================================================== + + // =================================================================== + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif // __GNUC__ + // Version + + // optional int32 major = 1; + inline bool Version::_internal_has_major() const + { + bool value = (_impl_._has_bits_[0] & 0x00000002u) != 0; + return value; + } + inline bool Version::has_major() const + { + return _internal_has_major(); + } + inline void Version::clear_major() + { + _impl_.major_ = 0; + _impl_._has_bits_[0] &= ~0x00000002u; + } + inline int32_t Version::_internal_major() const + { + return _impl_.major_; + } + inline int32_t Version::major() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.Version.major) + return _internal_major(); + } + inline void Version::_internal_set_major(int32_t value) + { + _impl_._has_bits_[0] |= 0x00000002u; + _impl_.major_ = value; + } + inline void Version::set_major(int32_t value) + { + _internal_set_major(value); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.Version.major) + } + + // optional int32 minor = 2; + inline bool Version::_internal_has_minor() const + { + bool value = (_impl_._has_bits_[0] & 0x00000004u) != 0; + return value; + } + inline bool Version::has_minor() const + { + return _internal_has_minor(); + } + inline void Version::clear_minor() + { + _impl_.minor_ = 0; + _impl_._has_bits_[0] &= ~0x00000004u; + } + inline int32_t Version::_internal_minor() const + { + return _impl_.minor_; + } + inline int32_t Version::minor() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.Version.minor) + return _internal_minor(); + } + inline void Version::_internal_set_minor(int32_t value) + { + _impl_._has_bits_[0] |= 0x00000004u; + _impl_.minor_ = value; + } + inline void Version::set_minor(int32_t value) + { + _internal_set_minor(value); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.Version.minor) + } + + // optional int32 patch = 3; + inline bool Version::_internal_has_patch() const + { + bool value = (_impl_._has_bits_[0] & 0x00000008u) != 0; + return value; + } + inline bool Version::has_patch() const + { + return _internal_has_patch(); + } + inline void Version::clear_patch() + { + _impl_.patch_ = 0; + _impl_._has_bits_[0] &= ~0x00000008u; + } + inline int32_t Version::_internal_patch() const + { + return _impl_.patch_; + } + inline int32_t Version::patch() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.Version.patch) + return _internal_patch(); + } + inline void Version::_internal_set_patch(int32_t value) + { + _impl_._has_bits_[0] |= 0x00000008u; + _impl_.patch_ = value; + } + inline void Version::set_patch(int32_t value) + { + _internal_set_patch(value); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.Version.patch) + } + + // optional string suffix = 4; + inline bool Version::_internal_has_suffix() const + { + bool value = (_impl_._has_bits_[0] & 0x00000001u) != 0; + return value; + } + inline bool Version::has_suffix() const + { + return _internal_has_suffix(); + } + inline void Version::clear_suffix() + { + _impl_.suffix_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000001u; + } + inline const std::string& Version::suffix() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.Version.suffix) + return _internal_suffix(); + } + template + inline PROTOBUF_ALWAYS_INLINE void Version::set_suffix(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.suffix_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.Version.suffix) + } + inline std::string* Version::mutable_suffix() + { + std::string* _s = _internal_mutable_suffix(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.Version.suffix) + return _s; + } + inline const std::string& Version::_internal_suffix() const + { + return _impl_.suffix_.Get(); + } + inline void Version::_internal_set_suffix(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.suffix_.Set(value, GetArenaForAllocation()); + } + inline std::string* Version::_internal_mutable_suffix() + { + _impl_._has_bits_[0] |= 0x00000001u; + return _impl_.suffix_.Mutable(GetArenaForAllocation()); + } + inline std::string* Version::release_suffix() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.Version.suffix) + if (!_internal_has_suffix()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000001u; + auto* p = _impl_.suffix_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.suffix_.IsDefault()) + { + _impl_.suffix_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void Version::set_allocated_suffix(std::string* suffix) + { + if (suffix != nullptr) + { + _impl_._has_bits_[0] |= 0x00000001u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000001u; + } + _impl_.suffix_.SetAllocated(suffix, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.suffix_.IsDefault()) + { + _impl_.suffix_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.Version.suffix) + } + + // ------------------------------------------------------------------- + + // CodeGeneratorRequest + + // repeated string file_to_generate = 1; + inline int CodeGeneratorRequest::_internal_file_to_generate_size() const + { + return _impl_.file_to_generate_.size(); + } + inline int CodeGeneratorRequest::file_to_generate_size() const + { + return _internal_file_to_generate_size(); + } + inline void CodeGeneratorRequest::clear_file_to_generate() + { + _impl_.file_to_generate_.Clear(); + } + inline std::string* CodeGeneratorRequest::add_file_to_generate() + { + std::string* _s = _internal_add_file_to_generate(); + // @@protoc_insertion_point(field_add_mutable:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + return _s; + } + inline const std::string& CodeGeneratorRequest::_internal_file_to_generate(int index) const + { + return _impl_.file_to_generate_.Get(index); + } + inline const std::string& CodeGeneratorRequest::file_to_generate(int index) const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + return _internal_file_to_generate(index); + } + inline std::string* CodeGeneratorRequest::mutable_file_to_generate(int index) + { + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + return _impl_.file_to_generate_.Mutable(index); + } + inline void CodeGeneratorRequest::set_file_to_generate(int index, const std::string& value) + { + _impl_.file_to_generate_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::set_file_to_generate(int index, std::string&& value) + { + _impl_.file_to_generate_.Mutable(index)->assign(std::move(value)); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::set_file_to_generate(int index, const char* value) + { + GOOGLE_DCHECK(value != nullptr); + _impl_.file_to_generate_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::set_file_to_generate(int index, const char* value, size_t size) + { + _impl_.file_to_generate_.Mutable(index)->assign( + reinterpret_cast(value), size + ); + // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline std::string* CodeGeneratorRequest::_internal_add_file_to_generate() + { + return _impl_.file_to_generate_.Add(); + } + inline void CodeGeneratorRequest::add_file_to_generate(const std::string& value) + { + _impl_.file_to_generate_.Add()->assign(value); + // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::add_file_to_generate(std::string&& value) + { + _impl_.file_to_generate_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::add_file_to_generate(const char* value) + { + GOOGLE_DCHECK(value != nullptr); + _impl_.file_to_generate_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline void CodeGeneratorRequest::add_file_to_generate(const char* value, size_t size) + { + _impl_.file_to_generate_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + } + inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& + CodeGeneratorRequest::file_to_generate() const + { + // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + return _impl_.file_to_generate_; + } + inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* + CodeGeneratorRequest::mutable_file_to_generate() + { + // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate) + return &_impl_.file_to_generate_; + } + + // optional string parameter = 2; + inline bool CodeGeneratorRequest::_internal_has_parameter() const + { + bool value = (_impl_._has_bits_[0] & 0x00000001u) != 0; + return value; + } + inline bool CodeGeneratorRequest::has_parameter() const + { + return _internal_has_parameter(); + } + inline void CodeGeneratorRequest::clear_parameter() + { + _impl_.parameter_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000001u; + } + inline const std::string& CodeGeneratorRequest::parameter() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.parameter) + return _internal_parameter(); + } + template + inline PROTOBUF_ALWAYS_INLINE void CodeGeneratorRequest::set_parameter(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.parameter_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorRequest.parameter) + } + inline std::string* CodeGeneratorRequest::mutable_parameter() + { + std::string* _s = _internal_mutable_parameter(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.parameter) + return _s; + } + inline const std::string& CodeGeneratorRequest::_internal_parameter() const + { + return _impl_.parameter_.Get(); + } + inline void CodeGeneratorRequest::_internal_set_parameter(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.parameter_.Set(value, GetArenaForAllocation()); + } + inline std::string* CodeGeneratorRequest::_internal_mutable_parameter() + { + _impl_._has_bits_[0] |= 0x00000001u; + return _impl_.parameter_.Mutable(GetArenaForAllocation()); + } + inline std::string* CodeGeneratorRequest::release_parameter() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorRequest.parameter) + if (!_internal_has_parameter()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000001u; + auto* p = _impl_.parameter_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.parameter_.IsDefault()) + { + _impl_.parameter_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void CodeGeneratorRequest::set_allocated_parameter(std::string* parameter) + { + if (parameter != nullptr) + { + _impl_._has_bits_[0] |= 0x00000001u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000001u; + } + _impl_.parameter_.SetAllocated(parameter, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.parameter_.IsDefault()) + { + _impl_.parameter_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorRequest.parameter) + } + + // repeated .google.protobuf.FileDescriptorProto proto_file = 15; + inline int CodeGeneratorRequest::_internal_proto_file_size() const + { + return _impl_.proto_file_.size(); + } + inline int CodeGeneratorRequest::proto_file_size() const + { + return _internal_proto_file_size(); + } + inline ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* CodeGeneratorRequest::mutable_proto_file(int index) + { + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.proto_file) + return _impl_.proto_file_.Mutable(index); + } + inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::FileDescriptorProto>* + CodeGeneratorRequest::mutable_proto_file() + { + // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorRequest.proto_file) + return &_impl_.proto_file_; + } + inline const ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto& CodeGeneratorRequest::_internal_proto_file(int index) const + { + return _impl_.proto_file_.Get(index); + } + inline const ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto& CodeGeneratorRequest::proto_file(int index) const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.proto_file) + return _internal_proto_file(index); + } + inline ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* CodeGeneratorRequest::_internal_add_proto_file() + { + return _impl_.proto_file_.Add(); + } + inline ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* CodeGeneratorRequest::add_proto_file() + { + ::PROTOBUF_NAMESPACE_ID::FileDescriptorProto* _add = _internal_add_proto_file(); + // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorRequest.proto_file) + return _add; + } + inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::FileDescriptorProto>& + CodeGeneratorRequest::proto_file() const + { + // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorRequest.proto_file) + return _impl_.proto_file_; + } + + // optional .google.protobuf.compiler.Version compiler_version = 3; + inline bool CodeGeneratorRequest::_internal_has_compiler_version() const + { + bool value = (_impl_._has_bits_[0] & 0x00000002u) != 0; + PROTOBUF_ASSUME(!value || _impl_.compiler_version_ != nullptr); + return value; + } + inline bool CodeGeneratorRequest::has_compiler_version() const + { + return _internal_has_compiler_version(); + } + inline void CodeGeneratorRequest::clear_compiler_version() + { + if (_impl_.compiler_version_ != nullptr) + _impl_.compiler_version_->Clear(); + _impl_._has_bits_[0] &= ~0x00000002u; + } + inline const ::PROTOBUF_NAMESPACE_ID::compiler::Version& CodeGeneratorRequest::_internal_compiler_version() const + { + const ::PROTOBUF_NAMESPACE_ID::compiler::Version* p = _impl_.compiler_version_; + return p != nullptr ? *p : reinterpret_cast(::PROTOBUF_NAMESPACE_ID::compiler::_Version_default_instance_); + } + inline const ::PROTOBUF_NAMESPACE_ID::compiler::Version& CodeGeneratorRequest::compiler_version() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.compiler_version) + return _internal_compiler_version(); + } + inline void CodeGeneratorRequest::unsafe_arena_set_allocated_compiler_version( + ::PROTOBUF_NAMESPACE_ID::compiler::Version* compiler_version + ) + { + if (GetArenaForAllocation() == nullptr) + { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.compiler_version_); + } + _impl_.compiler_version_ = compiler_version; + if (compiler_version) + { + _impl_._has_bits_[0] |= 0x00000002u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000002u; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.compiler.CodeGeneratorRequest.compiler_version) + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::Version* CodeGeneratorRequest::release_compiler_version() + { + _impl_._has_bits_[0] &= ~0x00000002u; + ::PROTOBUF_NAMESPACE_ID::compiler::Version* temp = _impl_.compiler_version_; + _impl_.compiler_version_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) + { + delete old; + } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) + { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::Version* CodeGeneratorRequest::unsafe_arena_release_compiler_version() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorRequest.compiler_version) + _impl_._has_bits_[0] &= ~0x00000002u; + ::PROTOBUF_NAMESPACE_ID::compiler::Version* temp = _impl_.compiler_version_; + _impl_.compiler_version_ = nullptr; + return temp; + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::Version* CodeGeneratorRequest::_internal_mutable_compiler_version() + { + _impl_._has_bits_[0] |= 0x00000002u; + if (_impl_.compiler_version_ == nullptr) + { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::compiler::Version>(GetArenaForAllocation()); + _impl_.compiler_version_ = p; + } + return _impl_.compiler_version_; + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::Version* CodeGeneratorRequest::mutable_compiler_version() + { + ::PROTOBUF_NAMESPACE_ID::compiler::Version* _msg = _internal_mutable_compiler_version(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.compiler_version) + return _msg; + } + inline void CodeGeneratorRequest::set_allocated_compiler_version(::PROTOBUF_NAMESPACE_ID::compiler::Version* compiler_version) + { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) + { + delete _impl_.compiler_version_; + } + if (compiler_version) + { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(compiler_version); + if (message_arena != submessage_arena) + { + compiler_version = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, compiler_version, submessage_arena + ); + } + _impl_._has_bits_[0] |= 0x00000002u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000002u; + } + _impl_.compiler_version_ = compiler_version; + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorRequest.compiler_version) + } + + // ------------------------------------------------------------------- + + // CodeGeneratorResponse_File + + // optional string name = 1; + inline bool CodeGeneratorResponse_File::_internal_has_name() const + { + bool value = (_impl_._has_bits_[0] & 0x00000001u) != 0; + return value; + } + inline bool CodeGeneratorResponse_File::has_name() const + { + return _internal_has_name(); + } + inline void CodeGeneratorResponse_File::clear_name() + { + _impl_.name_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000001u; + } + inline const std::string& CodeGeneratorResponse_File::name() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.name) + return _internal_name(); + } + template + inline PROTOBUF_ALWAYS_INLINE void CodeGeneratorResponse_File::set_name(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.name_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.name) + } + inline std::string* CodeGeneratorResponse_File::mutable_name() + { + std::string* _s = _internal_mutable_name(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.name) + return _s; + } + inline const std::string& CodeGeneratorResponse_File::_internal_name() const + { + return _impl_.name_.Get(); + } + inline void CodeGeneratorResponse_File::_internal_set_name(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.name_.Set(value, GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::_internal_mutable_name() + { + _impl_._has_bits_[0] |= 0x00000001u; + return _impl_.name_.Mutable(GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::release_name() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.name) + if (!_internal_has_name()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000001u; + auto* p = _impl_.name_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) + { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void CodeGeneratorResponse_File::set_allocated_name(std::string* name) + { + if (name != nullptr) + { + _impl_._has_bits_[0] |= 0x00000001u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000001u; + } + _impl_.name_.SetAllocated(name, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.name_.IsDefault()) + { + _impl_.name_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.name) + } + + // optional string insertion_point = 2; + inline bool CodeGeneratorResponse_File::_internal_has_insertion_point() const + { + bool value = (_impl_._has_bits_[0] & 0x00000002u) != 0; + return value; + } + inline bool CodeGeneratorResponse_File::has_insertion_point() const + { + return _internal_has_insertion_point(); + } + inline void CodeGeneratorResponse_File::clear_insertion_point() + { + _impl_.insertion_point_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000002u; + } + inline const std::string& CodeGeneratorResponse_File::insertion_point() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point) + return _internal_insertion_point(); + } + template + inline PROTOBUF_ALWAYS_INLINE void CodeGeneratorResponse_File::set_insertion_point(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000002u; + _impl_.insertion_point_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point) + } + inline std::string* CodeGeneratorResponse_File::mutable_insertion_point() + { + std::string* _s = _internal_mutable_insertion_point(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point) + return _s; + } + inline const std::string& CodeGeneratorResponse_File::_internal_insertion_point() const + { + return _impl_.insertion_point_.Get(); + } + inline void CodeGeneratorResponse_File::_internal_set_insertion_point(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000002u; + _impl_.insertion_point_.Set(value, GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::_internal_mutable_insertion_point() + { + _impl_._has_bits_[0] |= 0x00000002u; + return _impl_.insertion_point_.Mutable(GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::release_insertion_point() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point) + if (!_internal_has_insertion_point()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000002u; + auto* p = _impl_.insertion_point_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.insertion_point_.IsDefault()) + { + _impl_.insertion_point_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void CodeGeneratorResponse_File::set_allocated_insertion_point(std::string* insertion_point) + { + if (insertion_point != nullptr) + { + _impl_._has_bits_[0] |= 0x00000002u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000002u; + } + _impl_.insertion_point_.SetAllocated(insertion_point, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.insertion_point_.IsDefault()) + { + _impl_.insertion_point_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point) + } + + // optional string content = 15; + inline bool CodeGeneratorResponse_File::_internal_has_content() const + { + bool value = (_impl_._has_bits_[0] & 0x00000004u) != 0; + return value; + } + inline bool CodeGeneratorResponse_File::has_content() const + { + return _internal_has_content(); + } + inline void CodeGeneratorResponse_File::clear_content() + { + _impl_.content_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000004u; + } + inline const std::string& CodeGeneratorResponse_File::content() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.content) + return _internal_content(); + } + template + inline PROTOBUF_ALWAYS_INLINE void CodeGeneratorResponse_File::set_content(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000004u; + _impl_.content_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.content) + } + inline std::string* CodeGeneratorResponse_File::mutable_content() + { + std::string* _s = _internal_mutable_content(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.content) + return _s; + } + inline const std::string& CodeGeneratorResponse_File::_internal_content() const + { + return _impl_.content_.Get(); + } + inline void CodeGeneratorResponse_File::_internal_set_content(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000004u; + _impl_.content_.Set(value, GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::_internal_mutable_content() + { + _impl_._has_bits_[0] |= 0x00000004u; + return _impl_.content_.Mutable(GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse_File::release_content() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.content) + if (!_internal_has_content()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000004u; + auto* p = _impl_.content_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.content_.IsDefault()) + { + _impl_.content_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void CodeGeneratorResponse_File::set_allocated_content(std::string* content) + { + if (content != nullptr) + { + _impl_._has_bits_[0] |= 0x00000004u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000004u; + } + _impl_.content_.SetAllocated(content, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.content_.IsDefault()) + { + _impl_.content_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.content) + } + + // optional .google.protobuf.GeneratedCodeInfo generated_code_info = 16; + inline bool CodeGeneratorResponse_File::_internal_has_generated_code_info() const + { + bool value = (_impl_._has_bits_[0] & 0x00000008u) != 0; + PROTOBUF_ASSUME(!value || _impl_.generated_code_info_ != nullptr); + return value; + } + inline bool CodeGeneratorResponse_File::has_generated_code_info() const + { + return _internal_has_generated_code_info(); + } + inline const ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo& CodeGeneratorResponse_File::_internal_generated_code_info() const + { + const ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* p = _impl_.generated_code_info_; + return p != nullptr ? *p : reinterpret_cast(::PROTOBUF_NAMESPACE_ID::_GeneratedCodeInfo_default_instance_); + } + inline const ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo& CodeGeneratorResponse_File::generated_code_info() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info) + return _internal_generated_code_info(); + } + inline void CodeGeneratorResponse_File::unsafe_arena_set_allocated_generated_code_info( + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* generated_code_info + ) + { + if (GetArenaForAllocation() == nullptr) + { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.generated_code_info_); + } + _impl_.generated_code_info_ = generated_code_info; + if (generated_code_info) + { + _impl_._has_bits_[0] |= 0x00000008u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000008u; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info) + } + inline ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* CodeGeneratorResponse_File::release_generated_code_info() + { + _impl_._has_bits_[0] &= ~0x00000008u; + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* temp = _impl_.generated_code_info_; + _impl_.generated_code_info_ = nullptr; +#ifdef PROTOBUF_FORCE_COPY_IN_RELEASE + auto* old = reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(temp); + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + if (GetArenaForAllocation() == nullptr) + { + delete old; + } +#else // PROTOBUF_FORCE_COPY_IN_RELEASE + if (GetArenaForAllocation() != nullptr) + { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } +#endif // !PROTOBUF_FORCE_COPY_IN_RELEASE + return temp; + } + inline ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* CodeGeneratorResponse_File::unsafe_arena_release_generated_code_info() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info) + _impl_._has_bits_[0] &= ~0x00000008u; + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* temp = _impl_.generated_code_info_; + _impl_.generated_code_info_ = nullptr; + return temp; + } + inline ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* CodeGeneratorResponse_File::_internal_mutable_generated_code_info() + { + _impl_._has_bits_[0] |= 0x00000008u; + if (_impl_.generated_code_info_ == nullptr) + { + auto* p = CreateMaybeMessage<::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo>(GetArenaForAllocation()); + _impl_.generated_code_info_ = p; + } + return _impl_.generated_code_info_; + } + inline ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* CodeGeneratorResponse_File::mutable_generated_code_info() + { + ::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* _msg = _internal_mutable_generated_code_info(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info) + return _msg; + } + inline void CodeGeneratorResponse_File::set_allocated_generated_code_info(::PROTOBUF_NAMESPACE_ID::GeneratedCodeInfo* generated_code_info) + { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + if (message_arena == nullptr) + { + delete reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(_impl_.generated_code_info_); + } + if (generated_code_info) + { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena( + reinterpret_cast<::PROTOBUF_NAMESPACE_ID::MessageLite*>(generated_code_info) + ); + if (message_arena != submessage_arena) + { + generated_code_info = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, generated_code_info, submessage_arena + ); + } + _impl_._has_bits_[0] |= 0x00000008u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000008u; + } + _impl_.generated_code_info_ = generated_code_info; + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info) + } + + // ------------------------------------------------------------------- + + // CodeGeneratorResponse + + // optional string error = 1; + inline bool CodeGeneratorResponse::_internal_has_error() const + { + bool value = (_impl_._has_bits_[0] & 0x00000001u) != 0; + return value; + } + inline bool CodeGeneratorResponse::has_error() const + { + return _internal_has_error(); + } + inline void CodeGeneratorResponse::clear_error() + { + _impl_.error_.ClearToEmpty(); + _impl_._has_bits_[0] &= ~0x00000001u; + } + inline const std::string& CodeGeneratorResponse::error() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.error) + return _internal_error(); + } + template + inline PROTOBUF_ALWAYS_INLINE void CodeGeneratorResponse::set_error(ArgT0&& arg0, ArgT... args) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.error_.Set(static_cast(arg0), args..., GetArenaForAllocation()); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.error) + } + inline std::string* CodeGeneratorResponse::mutable_error() + { + std::string* _s = _internal_mutable_error(); + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.error) + return _s; + } + inline const std::string& CodeGeneratorResponse::_internal_error() const + { + return _impl_.error_.Get(); + } + inline void CodeGeneratorResponse::_internal_set_error(const std::string& value) + { + _impl_._has_bits_[0] |= 0x00000001u; + _impl_.error_.Set(value, GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse::_internal_mutable_error() + { + _impl_._has_bits_[0] |= 0x00000001u; + return _impl_.error_.Mutable(GetArenaForAllocation()); + } + inline std::string* CodeGeneratorResponse::release_error() + { + // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.error) + if (!_internal_has_error()) + { + return nullptr; + } + _impl_._has_bits_[0] &= ~0x00000001u; + auto* p = _impl_.error_.Release(); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.error_.IsDefault()) + { + _impl_.error_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + return p; + } + inline void CodeGeneratorResponse::set_allocated_error(std::string* error) + { + if (error != nullptr) + { + _impl_._has_bits_[0] |= 0x00000001u; + } + else + { + _impl_._has_bits_[0] &= ~0x00000001u; + } + _impl_.error_.SetAllocated(error, GetArenaForAllocation()); +#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING + if (_impl_.error_.IsDefault()) + { + _impl_.error_.Set("", GetArenaForAllocation()); + } +#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING + // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.error) + } + + // optional uint64 supported_features = 2; + inline bool CodeGeneratorResponse::_internal_has_supported_features() const + { + bool value = (_impl_._has_bits_[0] & 0x00000002u) != 0; + return value; + } + inline bool CodeGeneratorResponse::has_supported_features() const + { + return _internal_has_supported_features(); + } + inline void CodeGeneratorResponse::clear_supported_features() + { + _impl_.supported_features_ = uint64_t{0u}; + _impl_._has_bits_[0] &= ~0x00000002u; + } + inline uint64_t CodeGeneratorResponse::_internal_supported_features() const + { + return _impl_.supported_features_; + } + inline uint64_t CodeGeneratorResponse::supported_features() const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.supported_features) + return _internal_supported_features(); + } + inline void CodeGeneratorResponse::_internal_set_supported_features(uint64_t value) + { + _impl_._has_bits_[0] |= 0x00000002u; + _impl_.supported_features_ = value; + } + inline void CodeGeneratorResponse::set_supported_features(uint64_t value) + { + _internal_set_supported_features(value); + // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.supported_features) + } + + // repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; + inline int CodeGeneratorResponse::_internal_file_size() const + { + return _impl_.file_.size(); + } + inline int CodeGeneratorResponse::file_size() const + { + return _internal_file_size(); + } + inline void CodeGeneratorResponse::clear_file() + { + _impl_.file_.Clear(); + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* CodeGeneratorResponse::mutable_file(int index) + { + // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.file) + return _impl_.file_.Mutable(index); + } + inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File>* + CodeGeneratorResponse::mutable_file() + { + // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorResponse.file) + return &_impl_.file_; + } + inline const ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File& CodeGeneratorResponse::_internal_file(int index) const + { + return _impl_.file_.Get(index); + } + inline const ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File& CodeGeneratorResponse::file(int index) const + { + // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.file) + return _internal_file(index); + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* CodeGeneratorResponse::_internal_add_file() + { + return _impl_.file_.Add(); + } + inline ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* CodeGeneratorResponse::add_file() + { + ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File* _add = _internal_add_file(); + // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorResponse.file) + return _add; + } + inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_File>& + CodeGeneratorResponse::file() const + { + // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorResponse.file) + return _impl_.file_; + } + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif // __GNUC__ + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // @@protoc_insertion_point(namespace_scope) + +} // namespace compiler +PROTOBUF_NAMESPACE_CLOSE + +PROTOBUF_NAMESPACE_OPEN + +template<> +struct is_proto_enum<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_Feature> : ::std::true_type +{ +}; +template<> +inline const EnumDescriptor* GetEnumDescriptor<::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_Feature>() +{ + return ::PROTOBUF_NAMESPACE_ID::compiler::CodeGeneratorResponse_Feature_descriptor(); +} + +PROTOBUF_NAMESPACE_CLOSE + +// @@protoc_insertion_point(global_scope) + +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_google_2fprotobuf_2fcompiler_2fplugin_2eproto diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.proto b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.proto new file mode 100644 index 00000000..9242aacc --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.proto @@ -0,0 +1,183 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; + +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "google.golang.org/protobuf/types/pluginpb"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + optional uint64 supported_features = 2; + + // Sync with code_generator.h. + enum Feature { + FEATURE_NONE = 0; + FEATURE_PROTO3_OPTIONAL = 1; + } + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + optional GeneratedCodeInfo generated_code_info = 16; + } + repeated File file = 15; +} diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/python/generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/generator.h new file mode 100644 index 00000000..c6b72434 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/generator.h @@ -0,0 +1,182 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: robinson@google.com (Will Robinson) +// +// Generates Python code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__ + +#include + +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + + class Descriptor; + class EnumDescriptor; + class EnumValueDescriptor; + class FieldDescriptor; + class OneofDescriptor; + class ServiceDescriptor; + + namespace io + { + class Printer; + } + + namespace compiler + { + namespace python + { + + // CodeGenerator implementation for generated Python protocol buffer classes. + // If you create your own protocol compiler binary and you want it to support + // Python output, you can do so by registering an instance of this + // CodeGenerator with the CommandLineInterface in your main() function. + class PROTOC_EXPORT Generator : public CodeGenerator + { + public: + Generator(); + ~Generator() override; + + // CodeGenerator methods. + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const override; + + uint64_t GetSupportedFeatures() const override; + + private: + void PrintImports() const; + void PrintFileDescriptor() const; + void PrintAllNestedEnumsInFile() const; + void PrintNestedEnums(const Descriptor& descriptor) const; + void PrintEnum(const EnumDescriptor& enum_descriptor) const; + + void PrintFieldDescriptor(const FieldDescriptor& field, bool is_extension) const; + void PrintFieldDescriptorsInDescriptor( + const Descriptor& message_descriptor, bool is_extension, const std::string& list_variable_name, int (Descriptor::*CountFn)() const, const FieldDescriptor* (Descriptor::*GetterFn)(int) const + ) const; + void PrintFieldsInDescriptor(const Descriptor& message_descriptor) const; + void PrintExtensionsInDescriptor(const Descriptor& message_descriptor) const; + void PrintMessageDescriptors() const; + void PrintDescriptor(const Descriptor& message_descriptor) const; + void PrintNestedDescriptors(const Descriptor& containing_descriptor) const; + + void PrintMessages() const; + void PrintMessage(const Descriptor& message_descriptor, const std::string& prefix, std::vector* to_register, bool is_nested) const; + void PrintNestedMessages(const Descriptor& containing_descriptor, const std::string& prefix, std::vector* to_register) const; + + void FixForeignFieldsInDescriptors() const; + void FixForeignFieldsInDescriptor( + const Descriptor& descriptor, + const Descriptor* containing_descriptor + ) const; + void FixForeignFieldsInField(const Descriptor* containing_type, const FieldDescriptor& field, const std::string& python_dict_name) const; + void AddMessageToFileDescriptor(const Descriptor& descriptor) const; + void AddEnumToFileDescriptor(const EnumDescriptor& descriptor) const; + void AddExtensionToFileDescriptor(const FieldDescriptor& descriptor) const; + void AddServiceToFileDescriptor(const ServiceDescriptor& descriptor) const; + std::string FieldReferencingExpression( + const Descriptor* containing_type, const FieldDescriptor& field, const std::string& python_dict_name + ) const; + template + void FixContainingTypeInDescriptor( + const DescriptorT& descriptor, + const Descriptor* containing_descriptor + ) const; + + void FixForeignFieldsInExtensions() const; + void FixForeignFieldsInExtension( + const FieldDescriptor& extension_field + ) const; + void FixForeignFieldsInNestedExtensions(const Descriptor& descriptor) const; + + void PrintServices() const; + void PrintServiceDescriptors() const; + void PrintServiceDescriptor(const ServiceDescriptor& descriptor) const; + void PrintServiceClass(const ServiceDescriptor& descriptor) const; + void PrintServiceStub(const ServiceDescriptor& descriptor) const; + void PrintDescriptorKeyAndModuleName( + const ServiceDescriptor& descriptor + ) const; + + void PrintEnumValueDescriptor(const EnumValueDescriptor& descriptor) const; + std::string OptionsValue(const std::string& serialized_options) const; + bool GeneratingDescriptorProto() const; + + template + std::string ModuleLevelDescriptorName(const DescriptorT& descriptor) const; + std::string ModuleLevelMessageName(const Descriptor& descriptor) const; + std::string ModuleLevelServiceDescriptorName( + const ServiceDescriptor& descriptor + ) const; + + template + void PrintSerializedPbInterval(const DescriptorT& descriptor, DescriptorProtoT& proto, const std::string& name) const; + + void FixAllDescriptorOptions() const; + void FixOptionsForField(const FieldDescriptor& field) const; + void FixOptionsForOneof(const OneofDescriptor& oneof) const; + void FixOptionsForEnum(const EnumDescriptor& descriptor) const; + void FixOptionsForService(const ServiceDescriptor& descriptor) const; + void FixOptionsForMessage(const Descriptor& descriptor) const; + + void SetSerializedPbInterval() const; + void SetMessagePbInterval(const Descriptor& descriptor) const; + + void CopyPublicDependenciesAliases(const std::string& copy_from, const FileDescriptor* file) const; + + // Very coarse-grained lock to ensure that Generate() is reentrant. + // Guards file_, printer_ and file_descriptor_serialized_. + mutable Mutex mutex_; + mutable const FileDescriptor* file_; // Set in Generate(). Under mutex_. + mutable std::string file_descriptor_serialized_; + mutable io::Printer* printer_; // Set in Generate(). Under mutex_. + mutable bool pure_python_workable_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Generator); + }; + + } // namespace python + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/python/pyi_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/pyi_generator.h new file mode 100644 index 00000000..ac02dd6c --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/pyi_generator.h @@ -0,0 +1,120 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: jieluo@google.com (Jie Luo) +// +// Generates Python stub (.pyi) for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_PYTHON_PYI_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_PYTHON_PYI_GENERATOR_H__ + +#include +#include +#include + +#include +#include + +// Must be included last. +#include + +namespace google +{ + namespace protobuf + { + class Descriptor; + class EnumDescriptor; + class FieldDescriptor; + class MethodDescriptor; + class ServiceDescriptor; + + namespace io + { + class Printer; + } + + namespace compiler + { + namespace python + { + + class PROTOC_EXPORT PyiGenerator : public google::protobuf::compiler::CodeGenerator + { + public: + PyiGenerator(); + ~PyiGenerator() override; + + // CodeGenerator methods. + uint64_t GetSupportedFeatures() const override + { + // Code generators must explicitly support proto3 optional. + return CodeGenerator::FEATURE_PROTO3_OPTIONAL; + } + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const override; + + private: + void PrintImportForDescriptor(const FileDescriptor& desc, std::map* import_map, std::set* seen_aliases) const; + void PrintImports(std::map* item_map, std::map* import_map) const; + void PrintEnum(const EnumDescriptor& enum_descriptor) const; + void AddEnumValue(const EnumDescriptor& enum_descriptor, std::map* item_map, const std::map& import_map) const; + void PrintTopLevelEnums() const; + template + void AddExtensions(const DescriptorT& descriptor, std::map* item_map) const; + void PrintMessages( + const std::map& import_map + ) const; + void PrintMessage(const Descriptor& message_descriptor, bool is_nested, const std::map& import_map) const; + void PrintServices() const; + void PrintItemMap(const std::map& item_map) const; + std::string GetFieldType( + const FieldDescriptor& field_des, const Descriptor& containing_des, const std::map& import_map + ) const; + template + std::string ModuleLevelName( + const DescriptorT& descriptor, + const std::map& import_map + ) const; + + // Very coarse-grained lock to ensure that Generate() is reentrant. + // Guards file_ and printer_. + mutable Mutex mutex_; + mutable const FileDescriptor* file_; // Set in Generate(). Under mutex_. + mutable io::Printer* printer_; // Set in Generate(). Under mutex_. + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(PyiGenerator); + }; + + } // namespace python + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PYTHON_PYI_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/python/python_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/python_generator.h new file mode 100644 index 00000000..21d48cd9 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/python/python_generator.h @@ -0,0 +1,6 @@ +#ifndef GOOGLE_PROTOBUF_COMPILER_PYTHON_PYTHON_GENERATOR_H_ +#define GOOGLE_PROTOBUF_COMPILER_PYTHON_PYTHON_GENERATOR_H_ + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_PYTHON_PYTHON_GENERATOR_H_ diff --git a/CAPI/cpp/grpc/include/google/protobuf/compiler/ruby/ruby_generator.h b/CAPI/cpp/grpc/include/google/protobuf/compiler/ruby/ruby_generator.h new file mode 100644 index 00000000..4acbd3e0 --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/compiler/ruby/ruby_generator.h @@ -0,0 +1,71 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Generates Ruby code for a given .proto file. + +#ifndef GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__ +#define GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__ + +#include + +#include + +#include + +namespace google +{ + namespace protobuf + { + namespace compiler + { + namespace ruby + { + + // CodeGenerator implementation for generated Ruby protocol buffer classes. + // If you create your own protocol compiler binary and you want it to support + // Ruby output, you can do so by registering an instance of this + // CodeGenerator with the CommandLineInterface in your main() function. + class PROTOC_EXPORT Generator : public CodeGenerator + { + bool Generate(const FileDescriptor* file, const std::string& parameter, GeneratorContext* generator_context, std::string* error) const override; + uint64_t GetSupportedFeatures() const override + { + return FEATURE_PROTO3_OPTIONAL; + } + }; + + } // namespace ruby + } // namespace compiler + } // namespace protobuf +} // namespace google + +#include + +#endif // GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__ diff --git a/CAPI/cpp/grpc/include/google/protobuf/descriptor.h b/CAPI/cpp/grpc/include/google/protobuf/descriptor.h new file mode 100644 index 00000000..54ef064f --- /dev/null +++ b/CAPI/cpp/grpc/include/google/protobuf/descriptor.h @@ -0,0 +1,2583 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// This file contains classes which describe a type of protocol message. +// You can use a message's descriptor to learn at runtime what fields +// it contains and what the types of those fields are. The Message +// interface also allows you to dynamically access and modify individual +// fields by passing the FieldDescriptor of the field you are interested +// in. +// +// Most users will not care about descriptors, because they will write +// code specific to certain protocol types and will simply use the classes +// generated by the protocol compiler directly. Advanced users who want +// to operate on arbitrary types (not known at compile time) may want to +// read descriptors in order to learn about the contents of a message. +// A very small number of users will want to construct their own +// Descriptors, either because they are implementing Message manually or +// because they are writing something like the protocol compiler. +// +// For an example of how you might use descriptors, see the code example +// at the top of message.h. + +#ifndef GOOGLE_PROTOBUF_DESCRIPTOR_H__ +#define GOOGLE_PROTOBUF_DESCRIPTOR_H__ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// Must be included last. +#include + +// TYPE_BOOL is defined in the MacOS's ConditionalMacros.h. +#ifdef TYPE_BOOL +#undef TYPE_BOOL +#endif // TYPE_BOOL + +#ifdef SWIG +#define PROTOBUF_EXPORT +#endif + +namespace google +{ + namespace protobuf + { + + // Defined in this file. + class Descriptor; + class FieldDescriptor; + class OneofDescriptor; + class EnumDescriptor; + class EnumValueDescriptor; + class ServiceDescriptor; + class MethodDescriptor; + class FileDescriptor; + class DescriptorDatabase; + class DescriptorPool; + + // Defined in descriptor.proto + class DescriptorProto; + class DescriptorProto_ExtensionRange; + class FieldDescriptorProto; + class OneofDescriptorProto; + class EnumDescriptorProto; + class EnumValueDescriptorProto; + class ServiceDescriptorProto; + class MethodDescriptorProto; + class FileDescriptorProto; + class MessageOptions; + class FieldOptions; + class OneofOptions; + class EnumOptions; + class EnumValueOptions; + class ExtensionRangeOptions; + class ServiceOptions; + class MethodOptions; + class FileOptions; + class UninterpretedOption; + class SourceCodeInfo; + + // Defined in message.h + class Message; + class Reflection; + + // Defined in descriptor.cc + class DescriptorBuilder; + class FileDescriptorTables; + class Symbol; + + // Defined in unknown_field_set.h. + class UnknownField; + + // Defined in command_line_interface.cc + namespace compiler + { + class CommandLineInterface; + namespace cpp + { + // Defined in helpers.h + class Formatter; + } // namespace cpp + } // namespace compiler + + namespace descriptor_unittest + { + class DescriptorTest; + } // namespace descriptor_unittest + + // Defined in printer.h + namespace io + { + class Printer; + } // namespace io + + // NB, all indices are zero-based. + struct SourceLocation + { + int start_line; + int end_line; + int start_column; + int end_column; + + // Doc comments found at the source location. + // See the comments in SourceCodeInfo.Location (descriptor.proto) for details. + std::string leading_comments; + std::string trailing_comments; + std::vector leading_detached_comments; + }; + + // Options when generating machine-parsable output from a descriptor with + // DebugString(). + struct DebugStringOptions + { + // include original user comments as recorded in SourceLocation entries. N.B. + // that this must be |false| by default: several other pieces of code (for + // example, the C++ code generation for fields in the proto compiler) rely on + // DebugString() output being unobstructed by user comments. + bool include_comments; + // If true, elide the braced body in the debug string. + bool elide_group_body; + bool elide_oneof_body; + + DebugStringOptions() : + include_comments(false), + elide_group_body(false), + elide_oneof_body(false) + { + } + }; + + // A class to handle the simplest cases of a lazily linked descriptor + // for a message type that isn't built at the time of cross linking, + // which is needed when a pool has lazily_build_dependencies_ set. + // Must be instantiated as mutable in a descriptor. + namespace internal + { + + // The classes in this file represent a significant memory footprint for the + // library. We make sure we are not accidentally making them larger by + // hardcoding the struct size for a specific platform. Use as: + // + // PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(type, expected_size_in_x84-64); + // + +#if !defined(PROTOBUF_INTERNAL_CHECK_CLASS_SIZE) +#define PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(t, expected) +#endif + + class FlatAllocator; + + class PROTOBUF_EXPORT LazyDescriptor + { + public: + // Init function to be called at init time of a descriptor containing + // a LazyDescriptor. + void Init() + { + descriptor_ = nullptr; + once_ = nullptr; + } + + // Sets the value of the descriptor if it is known during the descriptor + // building process. Not thread safe, should only be called during the + // descriptor build process. Should not be called after SetLazy has been + // called. + void Set(const Descriptor* descriptor); + + // Sets the information needed to lazily cross link the descriptor at a later + // time, SetLazy is not thread safe, should be called only once at descriptor + // build time if the symbol wasn't found and building of the file containing + // that type is delayed because lazily_build_dependencies_ is set on the pool. + // Should not be called after Set() has been called. + void SetLazy(StringPiece name, const FileDescriptor* file); + + // Returns the current value of the descriptor, thread-safe. If SetLazy(...) + // has been called, will do a one-time cross link of the type specified, + // building the descriptor file that contains the type if necessary. + inline const Descriptor* Get(const ServiceDescriptor* service) + { + Once(service); + return descriptor_; + } + + private: + void Once(const ServiceDescriptor* service); + + const Descriptor* descriptor_; + // The once_ flag is followed by a NUL terminated string for the type name. + internal::once_flag* once_; + }; + + class PROTOBUF_EXPORT SymbolBase + { + private: + friend class google::protobuf::Symbol; + uint8_t symbol_type_; + }; + + // Some types have more than one SymbolBase because they have multiple + // identities in the table. We can't have duplicate direct bases, so we use this + // intermediate base to do so. + // See BuildEnumValue for details. + template + class PROTOBUF_EXPORT SymbolBaseN : public SymbolBase + { + }; + + } // namespace internal + + // Describes a type of protocol message, or a particular group within a + // message. To obtain the Descriptor for a given message object, call + // Message::GetDescriptor(). Generated message classes also have a + // static method called descriptor() which returns the type's descriptor. + // Use DescriptorPool to construct your own descriptors. + class PROTOBUF_EXPORT Descriptor : private internal::SymbolBase + { + public: + typedef DescriptorProto Proto; + + // The name of the message type, not including its scope. + const std::string& name() const; + + // The fully-qualified name of the message type, scope delimited by + // periods. For example, message type "Foo" which is declared in package + // "bar" has full name "bar.Foo". If a type "Baz" is nested within + // Foo, Baz's full_name is "bar.Foo.Baz". To get only the part that + // comes after the last '.', use name(). + const std::string& full_name() const; + + // Index of this descriptor within the file or containing type's message + // type array. + int index() const; + + // The .proto file in which this message type was defined. Never nullptr. + const FileDescriptor* file() const; + + // If this Descriptor describes a nested type, this returns the type + // in which it is nested. Otherwise, returns nullptr. + const Descriptor* containing_type() const; + + // Get options for this message type. These are specified in the .proto file + // by placing lines like "option foo = 1234;" in the message definition. + // Allowed options are defined by MessageOptions in descriptor.proto, and any + // available extensions of that message. + const MessageOptions& options() const; + + // Write the contents of this Descriptor into the given DescriptorProto. + // The target DescriptorProto must be clear before calling this; if it + // isn't, the result may be garbage. + void CopyTo(DescriptorProto* proto) const; + + // Write the contents of this descriptor in a human-readable form. Output + // will be suitable for re-parsing. + std::string DebugString() const; + + // Similar to DebugString(), but additionally takes options (e.g., + // include original user comments in output). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Returns true if this is a placeholder for an unknown type. This will + // only be the case if this descriptor comes from a DescriptorPool + // with AllowUnknownDependencies() set. + bool is_placeholder() const; + + enum WellKnownType + { + WELLKNOWNTYPE_UNSPECIFIED, // Not a well-known type. + + // Wrapper types. + WELLKNOWNTYPE_DOUBLEVALUE, // google.protobuf.DoubleValue + WELLKNOWNTYPE_FLOATVALUE, // google.protobuf.FloatValue + WELLKNOWNTYPE_INT64VALUE, // google.protobuf.Int64Value + WELLKNOWNTYPE_UINT64VALUE, // google.protobuf.UInt64Value + WELLKNOWNTYPE_INT32VALUE, // google.protobuf.Int32Value + WELLKNOWNTYPE_UINT32VALUE, // google.protobuf.UInt32Value + WELLKNOWNTYPE_STRINGVALUE, // google.protobuf.StringValue + WELLKNOWNTYPE_BYTESVALUE, // google.protobuf.BytesValue + WELLKNOWNTYPE_BOOLVALUE, // google.protobuf.BoolValue + + // Other well known types. + WELLKNOWNTYPE_ANY, // google.protobuf.Any + WELLKNOWNTYPE_FIELDMASK, // google.protobuf.FieldMask + WELLKNOWNTYPE_DURATION, // google.protobuf.Duration + WELLKNOWNTYPE_TIMESTAMP, // google.protobuf.Timestamp + WELLKNOWNTYPE_VALUE, // google.protobuf.Value + WELLKNOWNTYPE_LISTVALUE, // google.protobuf.ListValue + WELLKNOWNTYPE_STRUCT, // google.protobuf.Struct + + // New well-known types may be added in the future. + // Please make sure any switch() statements have a 'default' case. + __WELLKNOWNTYPE__DO_NOT_USE__ADD_DEFAULT_INSTEAD__, + }; + + WellKnownType well_known_type() const; + + // Field stuff ----------------------------------------------------- + + // The number of fields in this message type. + int field_count() const; + // Gets a field by index, where 0 <= index < field_count(). + // These are returned in the order they were defined in the .proto file. + const FieldDescriptor* field(int index) const; + + // Looks up a field by declared tag number. Returns nullptr if no such field + // exists. + const FieldDescriptor* FindFieldByNumber(int number) const; + // Looks up a field by name. Returns nullptr if no such field exists. + const FieldDescriptor* FindFieldByName(ConstStringParam name) const; + + // Looks up a field by lowercased name (as returned by lowercase_name()). + // This lookup may be ambiguous if multiple field names differ only by case, + // in which case the field returned is chosen arbitrarily from the matches. + const FieldDescriptor* FindFieldByLowercaseName( + ConstStringParam lowercase_name + ) const; + + // Looks up a field by camel-case name (as returned by camelcase_name()). + // This lookup may be ambiguous if multiple field names differ in a way that + // leads them to have identical camel-case names, in which case the field + // returned is chosen arbitrarily from the matches. + const FieldDescriptor* FindFieldByCamelcaseName( + ConstStringParam camelcase_name + ) const; + + // The number of oneofs in this message type. + int oneof_decl_count() const; + // The number of oneofs in this message type, excluding synthetic oneofs. + // Real oneofs always come first, so iterating up to real_oneof_decl_cout() + // will yield all real oneofs. + int real_oneof_decl_count() const; + // Get a oneof by index, where 0 <= index < oneof_decl_count(). + // These are returned in the order they were defined in the .proto file. + const OneofDescriptor* oneof_decl(int index) const; + + // Looks up a oneof by name. Returns nullptr if no such oneof exists. + const OneofDescriptor* FindOneofByName(ConstStringParam name) const; + + // Nested type stuff ----------------------------------------------- + + // The number of nested types in this message type. + int nested_type_count() const; + // Gets a nested type by index, where 0 <= index < nested_type_count(). + // These are returned in the order they were defined in the .proto file. + const Descriptor* nested_type(int index) const; + + // Looks up a nested type by name. Returns nullptr if no such nested type + // exists. + const Descriptor* FindNestedTypeByName(ConstStringParam name) const; + + // Enum stuff ------------------------------------------------------ + + // The number of enum types in this message type. + int enum_type_count() const; + // Gets an enum type by index, where 0 <= index < enum_type_count(). + // These are returned in the order they were defined in the .proto file. + const EnumDescriptor* enum_type(int index) const; + + // Looks up an enum type by name. Returns nullptr if no such enum type + // exists. + const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const; + + // Looks up an enum value by name, among all enum types in this message. + // Returns nullptr if no such value exists. + const EnumValueDescriptor* FindEnumValueByName(ConstStringParam name) const; + + // Extensions ------------------------------------------------------ + + // A range of field numbers which are designated for third-party + // extensions. + struct ExtensionRange + { + typedef DescriptorProto_ExtensionRange Proto; + + typedef ExtensionRangeOptions OptionsType; + + // See Descriptor::CopyTo(). + void CopyTo(DescriptorProto_ExtensionRange* proto) const; + + int start; // inclusive + int end; // exclusive + + const ExtensionRangeOptions* options_; + }; + + // The number of extension ranges in this message type. + int extension_range_count() const; + // Gets an extension range by index, where 0 <= index < + // extension_range_count(). These are returned in the order they were defined + // in the .proto file. + const ExtensionRange* extension_range(int index) const; + + // Returns true if the number is in one of the extension ranges. + bool IsExtensionNumber(int number) const; + + // Returns nullptr if no extension range contains the given number. + const ExtensionRange* FindExtensionRangeContainingNumber(int number) const; + + // The number of extensions defined nested within this message type's scope. + // See doc: + // https://developers.google.com/protocol-buffers/docs/proto#nested-extensions + // + // Note that the extensions may be extending *other* messages. + // + // For example: + // message M1 { + // extensions 1 to max; + // } + // + // message M2 { + // extend M1 { + // optional int32 foo = 1; + // } + // } + // + // In this case, + // DescriptorPool::generated_pool() + // ->FindMessageTypeByName("M2") + // ->extension(0) + // will return "foo", even though "foo" is an extension of M1. + // To find all known extensions of a given message, instead use + // DescriptorPool::FindAllExtensions. + int extension_count() const; + // Get an extension by index, where 0 <= index < extension_count(). + // These are returned in the order they were defined in the .proto file. + const FieldDescriptor* extension(int index) const; + + // Looks up a named extension (which extends some *other* message type) + // defined within this message type's scope. + const FieldDescriptor* FindExtensionByName(ConstStringParam name) const; + + // Similar to FindFieldByLowercaseName(), but finds extensions defined within + // this message type's scope. + const FieldDescriptor* FindExtensionByLowercaseName( + ConstStringParam name + ) const; + + // Similar to FindFieldByCamelcaseName(), but finds extensions defined within + // this message type's scope. + const FieldDescriptor* FindExtensionByCamelcaseName( + ConstStringParam name + ) const; + + // Reserved fields ------------------------------------------------- + + // A range of reserved field numbers. + struct ReservedRange + { + int start; // inclusive + int end; // exclusive + }; + + // The number of reserved ranges in this message type. + int reserved_range_count() const; + // Gets an reserved range by index, where 0 <= index < + // reserved_range_count(). These are returned in the order they were defined + // in the .proto file. + const ReservedRange* reserved_range(int index) const; + + // Returns true if the number is in one of the reserved ranges. + bool IsReservedNumber(int number) const; + + // Returns nullptr if no reserved range contains the given number. + const ReservedRange* FindReservedRangeContainingNumber(int number) const; + + // The number of reserved field names in this message type. + int reserved_name_count() const; + + // Gets a reserved name by index, where 0 <= index < reserved_name_count(). + const std::string& reserved_name(int index) const; + + // Returns true if the field name is reserved. + bool IsReservedName(ConstStringParam name) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this message declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + // Maps -------------------------------------------------------------- + + // Returns the FieldDescriptor for the "key" field. If this isn't a map entry + // field, returns nullptr. + const FieldDescriptor* map_key() const; + + // Returns the FieldDescriptor for the "value" field. If this isn't a map + // entry field, returns nullptr. + const FieldDescriptor* map_value() const; + + private: + friend class Symbol; + typedef MessageOptions OptionsType; + + // Allows tests to test CopyTo(proto, true). + friend class descriptor_unittest::DescriptorTest; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // Fill the json_name field of FieldDescriptorProto. + void CopyJsonNameTo(DescriptorProto* proto) const; + + // Internal version of DebugString; controls the level of indenting for + // correct depth. Takes |options| to control debug-string options, and + // |include_opening_clause| to indicate whether the "message ... " part of the + // clause has already been generated (this varies depending on context). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options, bool include_opening_clause) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + // True if this is a placeholder for an unknown type. + bool is_placeholder_ : 1; + // True if this is a placeholder and the type name wasn't fully-qualified. + bool is_unqualified_placeholder_ : 1; + // Well known type. Stored like this to conserve space. + uint8_t well_known_type_ : 5; + + // This points to the last field _number_ that is part of the sequence + // starting at 1, where + // `desc->field(i)->number() == i + 1` + // A value of `0` means no field matches. That is, there are no fields or the + // first field is not field `1`. + // Uses 16-bit to avoid extra padding. Unlikely to have more than 2^16 + // sequentially numbered fields in a message. + uint16_t sequential_field_limit_; + + int field_count_; + + // all_names_ = [name, full_name] + const std::string* all_names_; + const FileDescriptor* file_; + const Descriptor* containing_type_; + const MessageOptions* options_; + + // These arrays are separated from their sizes to minimize padding on 64-bit. + FieldDescriptor* fields_; + OneofDescriptor* oneof_decls_; + Descriptor* nested_types_; + EnumDescriptor* enum_types_; + ExtensionRange* extension_ranges_; + FieldDescriptor* extensions_; + ReservedRange* reserved_ranges_; + const std::string** reserved_names_; + + int oneof_decl_count_; + int real_oneof_decl_count_; + int nested_type_count_; + int enum_type_count_; + int extension_range_count_; + int extension_count_; + int reserved_range_count_; + int reserved_name_count_; + + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in descriptor.cc + // and update them to initialize the field. + + // Must be constructed using DescriptorPool. + Descriptor() + { + } + friend class DescriptorBuilder; + friend class DescriptorPool; + friend class EnumDescriptor; + friend class FieldDescriptor; + friend class FileDescriptorTables; + friend class OneofDescriptor; + friend class MethodDescriptor; + friend class FileDescriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Descriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(Descriptor, 136); + + // Describes a single field of a message. To get the descriptor for a given + // field, first get the Descriptor for the message in which it is defined, + // then call Descriptor::FindFieldByName(). To get a FieldDescriptor for + // an extension, do one of the following: + // - Get the Descriptor or FileDescriptor for its containing scope, then + // call Descriptor::FindExtensionByName() or + // FileDescriptor::FindExtensionByName(). + // - Given a DescriptorPool, call DescriptorPool::FindExtensionByNumber() or + // DescriptorPool::FindExtensionByPrintableName(). + // Use DescriptorPool to construct your own descriptors. + class PROTOBUF_EXPORT FieldDescriptor : private internal::SymbolBase + { + public: + typedef FieldDescriptorProto Proto; + + // Identifies a field type. 0 is reserved for errors. The order is weird + // for historical reasons. Types 12 and up are new in proto2. + enum Type + { + TYPE_DOUBLE = 1, // double, exactly eight bytes on the wire. + TYPE_FLOAT = 2, // float, exactly four bytes on the wire. + TYPE_INT64 = 3, // int64, varint on the wire. Negative numbers + // take 10 bytes. Use TYPE_SINT64 if negative + // values are likely. + TYPE_UINT64 = 4, // uint64, varint on the wire. + TYPE_INT32 = 5, // int32, varint on the wire. Negative numbers + // take 10 bytes. Use TYPE_SINT32 if negative + // values are likely. + TYPE_FIXED64 = 6, // uint64, exactly eight bytes on the wire. + TYPE_FIXED32 = 7, // uint32, exactly four bytes on the wire. + TYPE_BOOL = 8, // bool, varint on the wire. + TYPE_STRING = 9, // UTF-8 text. + TYPE_GROUP = 10, // Tag-delimited message. Deprecated. + TYPE_MESSAGE = 11, // Length-delimited message. + + TYPE_BYTES = 12, // Arbitrary byte array. + TYPE_UINT32 = 13, // uint32, varint on the wire + TYPE_ENUM = 14, // Enum, varint on the wire + TYPE_SFIXED32 = 15, // int32, exactly four bytes on the wire + TYPE_SFIXED64 = 16, // int64, exactly eight bytes on the wire + TYPE_SINT32 = 17, // int32, ZigZag-encoded varint on the wire + TYPE_SINT64 = 18, // int64, ZigZag-encoded varint on the wire + + MAX_TYPE = 18, // Constant useful for defining lookup tables + // indexed by Type. + }; + + // Specifies the C++ data type used to represent the field. There is a + // fixed mapping from Type to CppType where each Type maps to exactly one + // CppType. 0 is reserved for errors. + enum CppType + { + CPPTYPE_INT32 = 1, // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32 + CPPTYPE_INT64 = 2, // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64 + CPPTYPE_UINT32 = 3, // TYPE_UINT32, TYPE_FIXED32 + CPPTYPE_UINT64 = 4, // TYPE_UINT64, TYPE_FIXED64 + CPPTYPE_DOUBLE = 5, // TYPE_DOUBLE + CPPTYPE_FLOAT = 6, // TYPE_FLOAT + CPPTYPE_BOOL = 7, // TYPE_BOOL + CPPTYPE_ENUM = 8, // TYPE_ENUM + CPPTYPE_STRING = 9, // TYPE_STRING, TYPE_BYTES + CPPTYPE_MESSAGE = 10, // TYPE_MESSAGE, TYPE_GROUP + + MAX_CPPTYPE = 10, // Constant useful for defining lookup tables + // indexed by CppType. + }; + + // Identifies whether the field is optional, required, or repeated. 0 is + // reserved for errors. + enum Label + { + LABEL_OPTIONAL = 1, // optional + LABEL_REQUIRED = 2, // required + LABEL_REPEATED = 3, // repeated + + MAX_LABEL = 3, // Constant useful for defining lookup tables + // indexed by Label. + }; + + // Valid field numbers are positive integers up to kMaxNumber. + static const int kMaxNumber = (1 << 29) - 1; + + // First field number reserved for the protocol buffer library implementation. + // Users may not declare fields that use reserved numbers. + static const int kFirstReservedNumber = 19000; + // Last field number reserved for the protocol buffer library implementation. + // Users may not declare fields that use reserved numbers. + static const int kLastReservedNumber = 19999; + + const std::string& name() const; // Name of this field within the message. + const std::string& full_name() const; // Fully-qualified name of the field. + const std::string& json_name() const; // JSON name of this field. + const FileDescriptor* file() const; // File in which this field was defined. + bool is_extension() const; // Is this an extension field? + int number() const; // Declared tag number. + + // Same as name() except converted to lower-case. This (and especially the + // FindFieldByLowercaseName() method) can be useful when parsing formats + // which prefer to use lowercase naming style. (Although, technically + // field names should be lowercased anyway according to the protobuf style + // guide, so this only makes a difference when dealing with old .proto files + // which do not follow the guide.) + const std::string& lowercase_name() const; + + // Same as name() except converted to camel-case. In this conversion, any + // time an underscore appears in the name, it is removed and the next + // letter is capitalized. Furthermore, the first letter of the name is + // lower-cased. Examples: + // FooBar -> fooBar + // foo_bar -> fooBar + // fooBar -> fooBar + // This (and especially the FindFieldByCamelcaseName() method) can be useful + // when parsing formats which prefer to use camel-case naming style. + const std::string& camelcase_name() const; + + Type type() const; // Declared type of this field. + const char* type_name() const; // Name of the declared type. + CppType cpp_type() const; // C++ type of this field. + const char* cpp_type_name() const; // Name of the C++ type. + Label label() const; // optional/required/repeated + + bool is_required() const; // shorthand for label() == LABEL_REQUIRED + bool is_optional() const; // shorthand for label() == LABEL_OPTIONAL + bool is_repeated() const; // shorthand for label() == LABEL_REPEATED + bool is_packable() const; // shorthand for is_repeated() && + // IsTypePackable(type()) + bool is_packed() const; // shorthand for is_packable() && + // options().packed() + bool is_map() const; // shorthand for type() == TYPE_MESSAGE && + // message_type()->options().map_entry() + + // Returns true if this field was syntactically written with "optional" in the + // .proto file. Excludes singular proto3 fields that do not have a label. + bool has_optional_keyword() const; + + // Returns true if this field tracks presence, ie. does the field + // distinguish between "unset" and "present with default value." + // This includes required, optional, and oneof fields. It excludes maps, + // repeated fields, and singular proto3 fields without "optional". + // + // For fields where has_presence() == true, the return value of + // Reflection::HasField() is semantically meaningful. + bool has_presence() const; + + // Index of this field within the message's field array, or the file or + // extension scope's extensions array. + int index() const; + + // Does this field have an explicitly-declared default value? + bool has_default_value() const; + + // Whether the user has specified the json_name field option in the .proto + // file. + bool has_json_name() const; + + // Get the field default value if cpp_type() == CPPTYPE_INT32. If no + // explicit default was defined, the default is 0. + int32_t default_value_int32_t() const; + int32_t default_value_int32() const + { + return default_value_int32_t(); + } + // Get the field default value if cpp_type() == CPPTYPE_INT64. If no + // explicit default was defined, the default is 0. + int64_t default_value_int64_t() const; + int64_t default_value_int64() const + { + return default_value_int64_t(); + } + // Get the field default value if cpp_type() == CPPTYPE_UINT32. If no + // explicit default was defined, the default is 0. + uint32_t default_value_uint32_t() const; + uint32_t default_value_uint32() const + { + return default_value_uint32_t(); + } + // Get the field default value if cpp_type() == CPPTYPE_UINT64. If no + // explicit default was defined, the default is 0. + uint64_t default_value_uint64_t() const; + uint64_t default_value_uint64() const + { + return default_value_uint64_t(); + } + // Get the field default value if cpp_type() == CPPTYPE_FLOAT. If no + // explicit default was defined, the default is 0.0. + float default_value_float() const; + // Get the field default value if cpp_type() == CPPTYPE_DOUBLE. If no + // explicit default was defined, the default is 0.0. + double default_value_double() const; + // Get the field default value if cpp_type() == CPPTYPE_BOOL. If no + // explicit default was defined, the default is false. + bool default_value_bool() const; + // Get the field default value if cpp_type() == CPPTYPE_ENUM. If no + // explicit default was defined, the default is the first value defined + // in the enum type (all enum types are required to have at least one value). + // This never returns nullptr. + const EnumValueDescriptor* default_value_enum() const; + // Get the field default value if cpp_type() == CPPTYPE_STRING. If no + // explicit default was defined, the default is the empty string. + const std::string& default_value_string() const; + + // The Descriptor for the message of which this is a field. For extensions, + // this is the extended type. Never nullptr. + const Descriptor* containing_type() const; + + // If the field is a member of a oneof, this is the one, otherwise this is + // nullptr. + const OneofDescriptor* containing_oneof() const; + + // If the field is a member of a non-synthetic oneof, returns the descriptor + // for the oneof, otherwise returns nullptr. + const OneofDescriptor* real_containing_oneof() const; + + // If the field is a member of a oneof, returns the index in that oneof. + int index_in_oneof() const; + + // An extension may be declared within the scope of another message. If this + // field is an extension (is_extension() is true), then extension_scope() + // returns that message, or nullptr if the extension was declared at global + // scope. If this is not an extension, extension_scope() is undefined (may + // assert-fail). + const Descriptor* extension_scope() const; + + // If type is TYPE_MESSAGE or TYPE_GROUP, returns a descriptor for the + // message or the group type. Otherwise, returns null. + const Descriptor* message_type() const; + // If type is TYPE_ENUM, returns a descriptor for the enum. Otherwise, + // returns null. + const EnumDescriptor* enum_type() const; + + // Get the FieldOptions for this field. This includes things listed in + // square brackets after the field definition. E.g., the field: + // optional string text = 1 [ctype=CORD]; + // has the "ctype" option set. Allowed options are defined by FieldOptions in + // descriptor.proto, and any available extensions of that message. + const FieldOptions& options() const; + + // See Descriptor::CopyTo(). + void CopyTo(FieldDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Helper method to get the CppType for a particular Type. + static CppType TypeToCppType(Type type); + + // Helper method to get the name of a Type. + static const char* TypeName(Type type); + + // Helper method to get the name of a CppType. + static const char* CppTypeName(CppType cpp_type); + + // Return true iff [packed = true] is valid for fields of this type. + static inline bool IsTypePackable(Type field_type); + + // Returns full_name() except if the field is a MessageSet extension, + // in which case it returns the full_name() of the containing message type + // for backwards compatibility with proto1. + // + // A MessageSet extension is defined as an optional message extension + // whose containing type has the message_set_wire_format option set. + // This should be true of extensions of google.protobuf.bridge.MessageSet; + // by convention, such extensions are named "message_set_extension". + // + // The opposite operation (looking up an extension's FieldDescriptor given + // its printable name) can be accomplished with + // message->file()->pool()->FindExtensionByPrintableName(message, name) + // where the extension extends "message". + const std::string& PrintableNameForExtension() const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this field declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef FieldOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + friend class Reflection; + + // Fill the json_name field of FieldDescriptorProto. + void CopyJsonNameTo(FieldDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options) const; + + // formats the default value appropriately and returns it as a string. + // Must have a default value to call this. If quote_string_type is true, then + // types of CPPTYPE_STRING will be surrounded by quotes and CEscaped. + std::string DefaultValueAsString(bool quote_string_type) const; + + // Helper function that returns the field type name for DebugString. + std::string FieldTypeNameDebugString() const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + // Returns true if this is a map message type. + bool is_map_message_type() const; + + bool has_default_value_ : 1; + bool proto3_optional_ : 1; + // Whether the user has specified the json_name field option in the .proto + // file. + bool has_json_name_ : 1; + bool is_extension_ : 1; + bool is_oneof_ : 1; + + // Actually a `Label` but stored as uint8_t to save space. + uint8_t label_ : 2; + + // Actually a `Type`, but stored as uint8_t to save space. + mutable uint8_t type_; + + // Logically: + // all_names_ = [name, full_name, lower, camel, json] + // However: + // duplicates will be omitted, so lower/camel/json might be in the same + // position. + // We store the true offset for each name here, and the bit width must be + // large enough to account for the worst case where all names are present. + uint8_t lowercase_name_index_ : 2; + uint8_t camelcase_name_index_ : 2; + uint8_t json_name_index_ : 3; + // Sadly, `number_` located here to reduce padding. Unrelated to all_names_ + // and its indices above. + int number_; + const std::string* all_names_; + const FileDescriptor* file_; + + // The once_flag is followed by a NUL terminated string for the type name and + // enum default value (or empty string if no default enum). + internal::once_flag* type_once_; + static void TypeOnceInit(const FieldDescriptor* to_init); + void InternalTypeOnceInit() const; + const Descriptor* containing_type_; + union + { + const OneofDescriptor* containing_oneof; + const Descriptor* extension_scope; + } scope_; + union + { + mutable const Descriptor* message_type; + mutable const EnumDescriptor* enum_type; + } type_descriptor_; + const FieldOptions* options_; + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in + // descriptor.cc and update them to initialize the field. + + union + { + int32_t default_value_int32_t_; + int64_t default_value_int64_t_; + uint32_t default_value_uint32_t_; + uint64_t default_value_uint64_t_; + float default_value_float_; + double default_value_double_; + bool default_value_bool_; + + mutable const EnumValueDescriptor* default_value_enum_; + const std::string* default_value_string_; + mutable std::atomic default_generated_instance_; + }; + + static const CppType kTypeToCppTypeMap[MAX_TYPE + 1]; + + static const char* const kTypeToName[MAX_TYPE + 1]; + + static const char* const kCppTypeToName[MAX_CPPTYPE + 1]; + + static const char* const kLabelToName[MAX_LABEL + 1]; + + // Must be constructed using DescriptorPool. + FieldDescriptor() + { + } + friend class DescriptorBuilder; + friend class FileDescriptor; + friend class Descriptor; + friend class OneofDescriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FieldDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(FieldDescriptor, 72); + + // Describes a oneof defined in a message type. + class PROTOBUF_EXPORT OneofDescriptor : private internal::SymbolBase + { + public: + typedef OneofDescriptorProto Proto; + + const std::string& name() const; // Name of this oneof. + const std::string& full_name() const; // Fully-qualified name of the oneof. + + // Index of this oneof within the message's oneof array. + int index() const; + + // Returns whether this oneof was inserted by the compiler to wrap a proto3 + // optional field. If this returns true, code generators should *not* emit it. + bool is_synthetic() const; + + // The .proto file in which this oneof was defined. Never nullptr. + const FileDescriptor* file() const; + // The Descriptor for the message containing this oneof. + const Descriptor* containing_type() const; + + // The number of (non-extension) fields which are members of this oneof. + int field_count() const; + // Get a member of this oneof, in the order in which they were declared in the + // .proto file. Does not include extensions. + const FieldDescriptor* field(int index) const; + + const OneofOptions& options() const; + + // See Descriptor::CopyTo(). + void CopyTo(OneofDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this oneof declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef OneofOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // See Descriptor::DebugString(). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + int field_count_; + + // all_names_ = [name, full_name] + const std::string* all_names_; + const Descriptor* containing_type_; + const OneofOptions* options_; + const FieldDescriptor* fields_; + + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() + // in descriptor.cc and update them to initialize the field. + + // Must be constructed using DescriptorPool. + OneofDescriptor() + { + } + friend class DescriptorBuilder; + friend class Descriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(OneofDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(OneofDescriptor, 40); + + // Describes an enum type defined in a .proto file. To get the EnumDescriptor + // for a generated enum type, call TypeName_descriptor(). Use DescriptorPool + // to construct your own descriptors. + class PROTOBUF_EXPORT EnumDescriptor : private internal::SymbolBase + { + public: + typedef EnumDescriptorProto Proto; + + // The name of this enum type in the containing scope. + const std::string& name() const; + + // The fully-qualified name of the enum type, scope delimited by periods. + const std::string& full_name() const; + + // Index of this enum within the file or containing message's enum array. + int index() const; + + // The .proto file in which this enum type was defined. Never nullptr. + const FileDescriptor* file() const; + + // The number of values for this EnumDescriptor. Guaranteed to be greater + // than zero. + int value_count() const; + // Gets a value by index, where 0 <= index < value_count(). + // These are returned in the order they were defined in the .proto file. + const EnumValueDescriptor* value(int index) const; + + // Looks up a value by name. Returns nullptr if no such value exists. + const EnumValueDescriptor* FindValueByName(ConstStringParam name) const; + // Looks up a value by number. Returns nullptr if no such value exists. If + // multiple values have this number, the first one defined is returned. + const EnumValueDescriptor* FindValueByNumber(int number) const; + + // If this enum type is nested in a message type, this is that message type. + // Otherwise, nullptr. + const Descriptor* containing_type() const; + + // Get options for this enum type. These are specified in the .proto file by + // placing lines like "option foo = 1234;" in the enum definition. Allowed + // options are defined by EnumOptions in descriptor.proto, and any available + // extensions of that message. + const EnumOptions& options() const; + + // See Descriptor::CopyTo(). + void CopyTo(EnumDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Returns true if this is a placeholder for an unknown enum. This will + // only be the case if this descriptor comes from a DescriptorPool + // with AllowUnknownDependencies() set. + bool is_placeholder() const; + + // Reserved fields ------------------------------------------------- + + // A range of reserved field numbers. + struct ReservedRange + { + int start; // inclusive + int end; // inclusive + }; + + // The number of reserved ranges in this message type. + int reserved_range_count() const; + // Gets an reserved range by index, where 0 <= index < + // reserved_range_count(). These are returned in the order they were defined + // in the .proto file. + const EnumDescriptor::ReservedRange* reserved_range(int index) const; + + // Returns true if the number is in one of the reserved ranges. + bool IsReservedNumber(int number) const; + + // Returns nullptr if no reserved range contains the given number. + const EnumDescriptor::ReservedRange* FindReservedRangeContainingNumber( + int number + ) const; + + // The number of reserved field names in this message type. + int reserved_name_count() const; + + // Gets a reserved name by index, where 0 <= index < reserved_name_count(). + const std::string& reserved_name(int index) const; + + // Returns true if the field name is reserved. + bool IsReservedName(ConstStringParam name) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this enum declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef EnumOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // Allow access to FindValueByNumberCreatingIfUnknown. + friend class descriptor_unittest::DescriptorTest; + + // Looks up a value by number. If the value does not exist, dynamically + // creates a new EnumValueDescriptor for that value, assuming that it was + // unknown. If a new descriptor is created, this is done in a thread-safe way, + // and future calls will return the same value descriptor pointer. + // + // This is private but is used by Reflection (which is friended below) to + // return a valid EnumValueDescriptor from GetEnum() when this feature is + // enabled. + const EnumValueDescriptor* FindValueByNumberCreatingIfUnknown( + int number + ) const; + + // See Descriptor::DebugString(). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + // True if this is a placeholder for an unknown type. + bool is_placeholder_ : 1; + // True if this is a placeholder and the type name wasn't fully-qualified. + bool is_unqualified_placeholder_ : 1; + + // This points to the last value _index_ that is part of the sequence starting + // with the first label, where + // `enum->value(i)->number() == enum->value(0)->number() + i` + // We measure relative to the first label to adapt to enum labels starting at + // 0 or 1. + // Uses 16-bit to avoid extra padding. Unlikely to have more than 2^15 + // sequentially numbered labels in an enum. + int16_t sequential_value_limit_; + + int value_count_; + + // all_names_ = [name, full_name] + const std::string* all_names_; + const FileDescriptor* file_; + const Descriptor* containing_type_; + const EnumOptions* options_; + EnumValueDescriptor* values_; + + int reserved_range_count_; + int reserved_name_count_; + EnumDescriptor::ReservedRange* reserved_ranges_; + const std::string** reserved_names_; + + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in + // descriptor.cc and update them to initialize the field. + + // Must be constructed using DescriptorPool. + EnumDescriptor() + { + } + friend class DescriptorBuilder; + friend class Descriptor; + friend class FieldDescriptor; + friend class FileDescriptorTables; + friend class EnumValueDescriptor; + friend class FileDescriptor; + friend class DescriptorPool; + friend class Reflection; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EnumDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(EnumDescriptor, 72); + + // Describes an individual enum constant of a particular type. To get the + // EnumValueDescriptor for a given enum value, first get the EnumDescriptor + // for its type, then use EnumDescriptor::FindValueByName() or + // EnumDescriptor::FindValueByNumber(). Use DescriptorPool to construct + // your own descriptors. + class PROTOBUF_EXPORT EnumValueDescriptor : private internal::SymbolBaseN<0>, private internal::SymbolBaseN<1> + { + public: + typedef EnumValueDescriptorProto Proto; + + const std::string& name() const; // Name of this enum constant. + int index() const; // Index within the enums's Descriptor. + int number() const; // Numeric value of this enum constant. + + // The full_name of an enum value is a sibling symbol of the enum type. + // e.g. the full name of FieldDescriptorProto::TYPE_INT32 is actually + // "google.protobuf.FieldDescriptorProto.TYPE_INT32", NOT + // "google.protobuf.FieldDescriptorProto.Type.TYPE_INT32". This is to conform + // with C++ scoping rules for enums. + const std::string& full_name() const; + + // The .proto file in which this value was defined. Never nullptr. + const FileDescriptor* file() const; + // The type of this value. Never nullptr. + const EnumDescriptor* type() const; + + // Get options for this enum value. These are specified in the .proto file by + // adding text like "[foo = 1234]" after an enum value definition. Allowed + // options are defined by EnumValueOptions in descriptor.proto, and any + // available extensions of that message. + const EnumValueOptions& options() const; + + // See Descriptor::CopyTo(). + void CopyTo(EnumValueDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this enum value declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef EnumValueOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // See Descriptor::DebugString(). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + int number_; + // all_names_ = [name, full_name] + const std::string* all_names_; + const EnumDescriptor* type_; + const EnumValueOptions* options_; + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() + // in descriptor.cc and update them to initialize the field. + + // Must be constructed using DescriptorPool. + EnumValueDescriptor() + { + } + friend class DescriptorBuilder; + friend class EnumDescriptor; + friend class DescriptorPool; + friend class FileDescriptorTables; + friend class Reflection; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EnumValueDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(EnumValueDescriptor, 32); + + // Describes an RPC service. Use DescriptorPool to construct your own + // descriptors. + class PROTOBUF_EXPORT ServiceDescriptor : private internal::SymbolBase + { + public: + typedef ServiceDescriptorProto Proto; + + // The name of the service, not including its containing scope. + const std::string& name() const; + // The fully-qualified name of the service, scope delimited by periods. + const std::string& full_name() const; + // Index of this service within the file's services array. + int index() const; + + // The .proto file in which this service was defined. Never nullptr. + const FileDescriptor* file() const; + + // Get options for this service type. These are specified in the .proto file + // by placing lines like "option foo = 1234;" in the service definition. + // Allowed options are defined by ServiceOptions in descriptor.proto, and any + // available extensions of that message. + const ServiceOptions& options() const; + + // The number of methods this service defines. + int method_count() const; + // Gets a MethodDescriptor by index, where 0 <= index < method_count(). + // These are returned in the order they were defined in the .proto file. + const MethodDescriptor* method(int index) const; + + // Look up a MethodDescriptor by name. + const MethodDescriptor* FindMethodByName(ConstStringParam name) const; + + // See Descriptor::CopyTo(). + void CopyTo(ServiceDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this service declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef ServiceOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // See Descriptor::DebugString(). + void DebugString(std::string* contents, const DebugStringOptions& options) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + // all_names_ = [name, full_name] + const std::string* all_names_; + const FileDescriptor* file_; + const ServiceOptions* options_; + MethodDescriptor* methods_; + int method_count_; + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in + // descriptor.cc and update them to initialize the field. + + // Must be constructed using DescriptorPool. + ServiceDescriptor() + { + } + friend class DescriptorBuilder; + friend class FileDescriptor; + friend class MethodDescriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ServiceDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(ServiceDescriptor, 48); + + // Describes an individual service method. To obtain a MethodDescriptor given + // a service, first get its ServiceDescriptor, then call + // ServiceDescriptor::FindMethodByName(). Use DescriptorPool to construct your + // own descriptors. + class PROTOBUF_EXPORT MethodDescriptor : private internal::SymbolBase + { + public: + typedef MethodDescriptorProto Proto; + + // Name of this method, not including containing scope. + const std::string& name() const; + // The fully-qualified name of the method, scope delimited by periods. + const std::string& full_name() const; + // Index within the service's Descriptor. + int index() const; + + // The .proto file in which this method was defined. Never nullptr. + const FileDescriptor* file() const; + // Gets the service to which this method belongs. Never nullptr. + const ServiceDescriptor* service() const; + + // Gets the type of protocol message which this method accepts as input. + const Descriptor* input_type() const; + // Gets the type of protocol message which this message produces as output. + const Descriptor* output_type() const; + + // Gets whether the client streams multiple requests. + bool client_streaming() const; + // Gets whether the server streams multiple responses. + bool server_streaming() const; + + // Get options for this method. These are specified in the .proto file by + // placing lines like "option foo = 1234;" in curly-braces after a method + // declaration. Allowed options are defined by MethodOptions in + // descriptor.proto, and any available extensions of that message. + const MethodOptions& options() const; + + // See Descriptor::CopyTo(). + void CopyTo(MethodDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Source Location --------------------------------------------------- + + // Updates |*out_location| to the source location of the complete + // extent of this method declaration. Returns false and leaves + // |*out_location| unchanged iff location information was not available. + bool GetSourceLocation(SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef MethodOptions OptionsType; + + // Allows access to GetLocationPath for annotations. + friend class io::Printer; + friend class compiler::cpp::Formatter; + + // See Descriptor::DebugString(). + void DebugString(int depth, std::string* contents, const DebugStringOptions& options) const; + + // Walks up the descriptor tree to generate the source location path + // to this descriptor from the file root. + void GetLocationPath(std::vector* output) const; + + bool client_streaming_; + bool server_streaming_; + // all_names_ = [name, full_name] + const std::string* all_names_; + const ServiceDescriptor* service_; + mutable internal::LazyDescriptor input_type_; + mutable internal::LazyDescriptor output_type_; + const MethodOptions* options_; + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in + // descriptor.cc and update them to initialize the field. + + // Must be constructed using DescriptorPool. + MethodDescriptor() + { + } + friend class DescriptorBuilder; + friend class ServiceDescriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MethodDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(MethodDescriptor, 64); + + // Describes a whole .proto file. To get the FileDescriptor for a compiled-in + // file, get the descriptor for something defined in that file and call + // descriptor->file(). Use DescriptorPool to construct your own descriptors. + class PROTOBUF_EXPORT FileDescriptor : private internal::SymbolBase + { + public: + typedef FileDescriptorProto Proto; + + // The filename, relative to the source tree. + // e.g. "foo/bar/baz.proto" + const std::string& name() const; + + // The package, e.g. "google.protobuf.compiler". + const std::string& package() const; + + // The DescriptorPool in which this FileDescriptor and all its contents were + // allocated. Never nullptr. + const DescriptorPool* pool() const; + + // The number of files imported by this one. + int dependency_count() const; + // Gets an imported file by index, where 0 <= index < dependency_count(). + // These are returned in the order they were defined in the .proto file. + const FileDescriptor* dependency(int index) const; + + // The number of files public imported by this one. + // The public dependency list is a subset of the dependency list. + int public_dependency_count() const; + // Gets a public imported file by index, where 0 <= index < + // public_dependency_count(). + // These are returned in the order they were defined in the .proto file. + const FileDescriptor* public_dependency(int index) const; + + // The number of files that are imported for weak fields. + // The weak dependency list is a subset of the dependency list. + int weak_dependency_count() const; + // Gets a weak imported file by index, where 0 <= index < + // weak_dependency_count(). + // These are returned in the order they were defined in the .proto file. + const FileDescriptor* weak_dependency(int index) const; + + // Number of top-level message types defined in this file. (This does not + // include nested types.) + int message_type_count() const; + // Gets a top-level message type, where 0 <= index < message_type_count(). + // These are returned in the order they were defined in the .proto file. + const Descriptor* message_type(int index) const; + + // Number of top-level enum types defined in this file. (This does not + // include nested types.) + int enum_type_count() const; + // Gets a top-level enum type, where 0 <= index < enum_type_count(). + // These are returned in the order they were defined in the .proto file. + const EnumDescriptor* enum_type(int index) const; + + // Number of services defined in this file. + int service_count() const; + // Gets a service, where 0 <= index < service_count(). + // These are returned in the order they were defined in the .proto file. + const ServiceDescriptor* service(int index) const; + + // Number of extensions defined at file scope. (This does not include + // extensions nested within message types.) + int extension_count() const; + // Gets an extension's descriptor, where 0 <= index < extension_count(). + // These are returned in the order they were defined in the .proto file. + const FieldDescriptor* extension(int index) const; + + // Get options for this file. These are specified in the .proto file by + // placing lines like "option foo = 1234;" at the top level, outside of any + // other definitions. Allowed options are defined by FileOptions in + // descriptor.proto, and any available extensions of that message. + const FileOptions& options() const; + + // Syntax of this file. + enum Syntax + { + SYNTAX_UNKNOWN = 0, + SYNTAX_PROTO2 = 2, + SYNTAX_PROTO3 = 3, + }; + Syntax syntax() const; + static const char* SyntaxName(Syntax syntax); + + // Find a top-level message type by name (not full_name). Returns nullptr if + // not found. + const Descriptor* FindMessageTypeByName(ConstStringParam name) const; + // Find a top-level enum type by name. Returns nullptr if not found. + const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const; + // Find an enum value defined in any top-level enum by name. Returns nullptr + // if not found. + const EnumValueDescriptor* FindEnumValueByName(ConstStringParam name) const; + // Find a service definition by name. Returns nullptr if not found. + const ServiceDescriptor* FindServiceByName(ConstStringParam name) const; + // Find a top-level extension definition by name. Returns nullptr if not + // found. + const FieldDescriptor* FindExtensionByName(ConstStringParam name) const; + // Similar to FindExtensionByName(), but searches by lowercased-name. See + // Descriptor::FindFieldByLowercaseName(). + const FieldDescriptor* FindExtensionByLowercaseName( + ConstStringParam name + ) const; + // Similar to FindExtensionByName(), but searches by camelcased-name. See + // Descriptor::FindFieldByCamelcaseName(). + const FieldDescriptor* FindExtensionByCamelcaseName( + ConstStringParam name + ) const; + + // See Descriptor::CopyTo(). + // Notes: + // - This method does NOT copy source code information since it is relatively + // large and rarely needed. See CopySourceCodeInfoTo() below. + void CopyTo(FileDescriptorProto* proto) const; + // Write the source code information of this FileDescriptor into the given + // FileDescriptorProto. See CopyTo() above. + void CopySourceCodeInfoTo(FileDescriptorProto* proto) const; + // Fill the json_name field of FieldDescriptorProto for all fields. Can only + // be called after CopyTo(). + void CopyJsonNameTo(FileDescriptorProto* proto) const; + + // See Descriptor::DebugString(). + std::string DebugString() const; + + // See Descriptor::DebugStringWithOptions(). + std::string DebugStringWithOptions(const DebugStringOptions& options) const; + + // Returns true if this is a placeholder for an unknown file. This will + // only be the case if this descriptor comes from a DescriptorPool + // with AllowUnknownDependencies() set. + bool is_placeholder() const; + + // Updates |*out_location| to the source location of the complete extent of + // this file declaration (namely, the empty path). + bool GetSourceLocation(SourceLocation* out_location) const; + + // Updates |*out_location| to the source location of the complete + // extent of the declaration or declaration-part denoted by |path|. + // Returns false and leaves |*out_location| unchanged iff location + // information was not available. (See SourceCodeInfo for + // description of path encoding.) + bool GetSourceLocation(const std::vector& path, SourceLocation* out_location) const; + + private: + friend class Symbol; + typedef FileOptions OptionsType; + + bool is_placeholder_; + // Indicates the FileDescriptor is completed building. Used to verify + // that type accessor functions that can possibly build a dependent file + // aren't called during the process of building the file. + bool finished_building_; + // Actually a `Syntax` but stored as uint8_t to save space. + uint8_t syntax_; + // This one is here to fill the padding. + int extension_count_; + + const std::string* name_; + const std::string* package_; + const DescriptorPool* pool_; + + // dependencies_once_ contain a once_flag followed by N NUL terminated + // strings. Dependencies that do not need to be loaded will be empty. ie just + // {'\0'} + internal::once_flag* dependencies_once_; + static void DependenciesOnceInit(const FileDescriptor* to_init); + void InternalDependenciesOnceInit() const; + + // These are arranged to minimize padding on 64-bit. + int dependency_count_; + int public_dependency_count_; + int weak_dependency_count_; + int message_type_count_; + int enum_type_count_; + int service_count_; + + mutable const FileDescriptor** dependencies_; + int* public_dependencies_; + int* weak_dependencies_; + Descriptor* message_types_; + EnumDescriptor* enum_types_; + ServiceDescriptor* services_; + FieldDescriptor* extensions_; + const FileOptions* options_; + + const FileDescriptorTables* tables_; + const SourceCodeInfo* source_code_info_; + + // IMPORTANT: If you add a new field, make sure to search for all instances + // of Allocate() and AllocateArray() in + // descriptor.cc and update them to initialize the field. + + FileDescriptor() + { + } + friend class DescriptorBuilder; + friend class DescriptorPool; + friend class Descriptor; + friend class FieldDescriptor; + friend class internal::LazyDescriptor; + friend class OneofDescriptor; + friend class EnumDescriptor; + friend class EnumValueDescriptor; + friend class MethodDescriptor; + friend class ServiceDescriptor; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FileDescriptor); + }; + + PROTOBUF_INTERNAL_CHECK_CLASS_SIZE(FileDescriptor, 144); + + // =================================================================== + + // Used to construct descriptors. + // + // Normally you won't want to build your own descriptors. Message classes + // constructed by the protocol compiler will provide them for you. However, + // if you are implementing Message on your own, or if you are writing a + // program which can operate on totally arbitrary types and needs to load + // them from some sort of database, you might need to. + // + // Since Descriptors are composed of a whole lot of cross-linked bits of + // data that would be a pain to put together manually, the + // DescriptorPool class is provided to make the process easier. It can + // take a FileDescriptorProto (defined in descriptor.proto), validate it, + // and convert it to a set of nicely cross-linked Descriptors. + // + // DescriptorPool also helps with memory management. Descriptors are + // composed of many objects containing static data and pointers to each + // other. In all likelihood, when it comes time to delete this data, + // you'll want to delete it all at once. In fact, it is not uncommon to + // have a whole pool of descriptors all cross-linked with each other which + // you wish to delete all at once. This class represents such a pool, and + // handles the memory management for you. + // + // You can also search for descriptors within a DescriptorPool by name, and + // extensions by number. + class PROTOBUF_EXPORT DescriptorPool + { + public: + // Create a normal, empty DescriptorPool. + DescriptorPool(); + + // Constructs a DescriptorPool that, when it can't find something among the + // descriptors already in the pool, looks for it in the given + // DescriptorDatabase. + // Notes: + // - If a DescriptorPool is constructed this way, its BuildFile*() methods + // must not be called (they will assert-fail). The only way to populate + // the pool with descriptors is to call the Find*By*() methods. + // - The Find*By*() methods may block the calling thread if the + // DescriptorDatabase blocks. This in turn means that parsing messages + // may block if they need to look up extensions. + // - The Find*By*() methods will use mutexes for thread-safety, thus making + // them slower even when they don't have to fall back to the database. + // In fact, even the Find*By*() methods of descriptor objects owned by + // this pool will be slower, since they will have to obtain locks too. + // - An ErrorCollector may optionally be given to collect validation errors + // in files loaded from the database. If not given, errors will be printed + // to GOOGLE_LOG(ERROR). Remember that files are built on-demand, so this + // ErrorCollector may be called from any thread that calls one of the + // Find*By*() methods. + // - The DescriptorDatabase must not be mutated during the lifetime of + // the DescriptorPool. Even if the client takes care to avoid data races, + // changes to the content of the DescriptorDatabase may not be reflected + // in subsequent lookups in the DescriptorPool. + class ErrorCollector; + explicit DescriptorPool(DescriptorDatabase* fallback_database, ErrorCollector* error_collector = nullptr); + + ~DescriptorPool(); + + // Get a pointer to the generated pool. Generated protocol message classes + // which are compiled into the binary will allocate their descriptors in + // this pool. Do not add your own descriptors to this pool. + static const DescriptorPool* generated_pool(); + + // Find a FileDescriptor in the pool by file name. Returns nullptr if not + // found. + const FileDescriptor* FindFileByName(ConstStringParam name) const; + + // Find the FileDescriptor in the pool which defines the given symbol. + // If any of the Find*ByName() methods below would succeed, then this is + // equivalent to calling that method and calling the result's file() method. + // Otherwise this returns nullptr. + const FileDescriptor* FindFileContainingSymbol( + ConstStringParam symbol_name + ) const; + + // Looking up descriptors ------------------------------------------ + // These find descriptors by fully-qualified name. These will find both + // top-level descriptors and nested descriptors. They return nullptr if not + // found. + + const Descriptor* FindMessageTypeByName(ConstStringParam name) const; + const FieldDescriptor* FindFieldByName(ConstStringParam name) const; + const FieldDescriptor* FindExtensionByName(ConstStringParam name) const; + const OneofDescriptor* FindOneofByName(ConstStringParam name) const; + const EnumDescriptor* FindEnumTypeByName(ConstStringParam name) const; + const EnumValueDescriptor* FindEnumValueByName(ConstStringParam name) const; + const ServiceDescriptor* FindServiceByName(ConstStringParam name) const; + const MethodDescriptor* FindMethodByName(ConstStringParam name) const; + + // Finds an extension of the given type by number. The extendee must be + // a member of this DescriptorPool or one of its underlays. + const FieldDescriptor* FindExtensionByNumber(const Descriptor* extendee, int number) const; + + // Finds an extension of the given type by its printable name. + // See comments above PrintableNameForExtension() for the definition of + // "printable name". The extendee must be a member of this DescriptorPool + // or one of its underlays. Returns nullptr if there is no known message + // extension with the given printable name. + const FieldDescriptor* FindExtensionByPrintableName( + const Descriptor* extendee, ConstStringParam printable_name + ) const; + + // Finds extensions of extendee. The extensions will be appended to + // out in an undefined order. Only extensions defined directly in + // this DescriptorPool or one of its underlays are guaranteed to be + // found: extensions defined in the fallback database might not be found + // depending on the database implementation. + void FindAllExtensions(const Descriptor* extendee, std::vector* out) const; + + // Building descriptors -------------------------------------------- + + // When converting a FileDescriptorProto to a FileDescriptor, various + // errors might be detected in the input. The caller may handle these + // programmatically by implementing an ErrorCollector. + class PROTOBUF_EXPORT ErrorCollector + { + public: + inline ErrorCollector() + { + } + virtual ~ErrorCollector(); + + // These constants specify what exact part of the construct is broken. + // This is useful e.g. for mapping the error back to an exact location + // in a .proto file. + enum ErrorLocation + { + NAME, // the symbol name, or the package name for files + NUMBER, // field or extension range number + TYPE, // field type + EXTENDEE, // field extendee + DEFAULT_VALUE, // field default value + INPUT_TYPE, // method input type + OUTPUT_TYPE, // method output type + OPTION_NAME, // name in assignment + OPTION_VALUE, // value in option assignment + IMPORT, // import error + OTHER // some other problem + }; + + // Reports an error in the FileDescriptorProto. Use this function if the + // problem occurred should interrupt building the FileDescriptorProto. + virtual void AddError( + const std::string& filename, // File name in which the error occurred. + const std::string& element_name, // Full name of the erroneous element. + const Message* descriptor, // Descriptor of the erroneous element. + ErrorLocation location, // One of the location constants, above. + const std::string& message // Human-readable error message. + ) = 0; + + // Reports a warning in the FileDescriptorProto. Use this function if the + // problem occurred should NOT interrupt building the FileDescriptorProto. + virtual void AddWarning( + const std::string& /*filename*/, // File name in which the error + // occurred. + const std::string& /*element_name*/, // Full name of the erroneous + // element. + const Message* /*descriptor*/, // Descriptor of the erroneous element. + ErrorLocation /*location*/, // One of the location constants, above. + const std::string& /*message*/ // Human-readable error message. + ) + { + } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ErrorCollector); + }; + + // Convert the FileDescriptorProto to real descriptors and place them in + // this DescriptorPool. All dependencies of the file must already be in + // the pool. Returns the resulting FileDescriptor, or nullptr if there were + // problems with the input (e.g. the message was invalid, or dependencies + // were missing). Details about the errors are written to GOOGLE_LOG(ERROR). + const FileDescriptor* BuildFile(const FileDescriptorProto& proto); + + // Same as BuildFile() except errors are sent to the given ErrorCollector. + const FileDescriptor* BuildFileCollectingErrors( + const FileDescriptorProto& proto, ErrorCollector* error_collector + ); + + // By default, it is an error if a FileDescriptorProto contains references + // to types or other files that are not found in the DescriptorPool (or its + // backing DescriptorDatabase, if any). If you call + // AllowUnknownDependencies(), however, then unknown types and files + // will be replaced by placeholder descriptors (which can be identified by + // the is_placeholder() method). This can allow you to + // perform some useful operations with a .proto file even if you do not + // have access to other .proto files on which it depends. However, some + // heuristics must be used to fill in the gaps in information, and these + // can lead to descriptors which are inaccurate. For example, the + // DescriptorPool may be forced to guess whether an unknown type is a message + // or an enum, as well as what package it resides in. Furthermore, + // placeholder types will not be discoverable via FindMessageTypeByName() + // and similar methods, which could confuse some descriptor-based algorithms. + // Generally, the results of this option should be handled with extreme care. + void AllowUnknownDependencies() + { + allow_unknown_ = true; + } + + // By default, weak imports are allowed to be missing, in which case we will + // use a placeholder for the dependency and convert the field to be an Empty + // message field. If you call EnforceWeakDependencies(true), however, the + // DescriptorPool will report a import not found error. + void EnforceWeakDependencies(bool enforce) + { + enforce_weak_ = enforce; + } + + // Internal stuff -------------------------------------------------- + // These methods MUST NOT be called from outside the proto2 library. + // These methods may contain hidden pitfalls and may be removed in a + // future library version. + + // Create a DescriptorPool which is overlaid on top of some other pool. + // If you search for a descriptor in the overlay and it is not found, the + // underlay will be searched as a backup. If the underlay has its own + // underlay, that will be searched next, and so on. This also means that + // files built in the overlay will be cross-linked with the underlay's + // descriptors if necessary. The underlay remains property of the caller; + // it must remain valid for the lifetime of the newly-constructed pool. + // + // Example: Say you want to parse a .proto file at runtime in order to use + // its type with a DynamicMessage. Say this .proto file has dependencies, + // but you know that all the dependencies will be things that are already + // compiled into the binary. For ease of use, you'd like to load the types + // right out of generated_pool() rather than have to parse redundant copies + // of all these .protos and runtime. But, you don't want to add the parsed + // types directly into generated_pool(): this is not allowed, and would be + // bad design anyway. So, instead, you could use generated_pool() as an + // underlay for a new DescriptorPool in which you add only the new file. + // + // WARNING: Use of underlays can lead to many subtle gotchas. Instead, + // try to formulate what you want to do in terms of DescriptorDatabases. + explicit DescriptorPool(const DescriptorPool* underlay); + + // Called by generated classes at init time to add their descriptors to + // generated_pool. Do NOT call this in your own code! filename must be a + // permanent string (e.g. a string literal). + static void InternalAddGeneratedFile(const void* encoded_file_descriptor, int size); + + // Disallow [enforce_utf8 = false] in .proto files. + void DisallowEnforceUtf8() + { + disallow_enforce_utf8_ = true; + } + + // For internal use only: Gets a non-const pointer to the generated pool. + // This is called at static-initialization time only, so thread-safety is + // not a concern. If both an underlay and a fallback database are present, + // the underlay takes precedence. + static DescriptorPool* internal_generated_pool(); + + // For internal use only: Gets a non-const pointer to the generated + // descriptor database. + // Only used for testing. + static DescriptorDatabase* internal_generated_database(); + + // For internal use only: Changes the behavior of BuildFile() such that it + // allows the file to make reference to message types declared in other files + // which it did not officially declare as dependencies. + void InternalDontEnforceDependencies(); + + // For internal use only: Enables lazy building of dependencies of a file. + // Delay the building of dependencies of a file descriptor until absolutely + // necessary, like when message_type() is called on a field that is defined + // in that dependency's file. This will cause functional issues if a proto + // or one of its dependencies has errors. Should only be enabled for the + // generated_pool_ (because no descriptor build errors are guaranteed by + // the compilation generation process), testing, or if a lack of descriptor + // build errors can be guaranteed for a pool. + void InternalSetLazilyBuildDependencies() + { + lazily_build_dependencies_ = true; + // This needs to be set when lazily building dependencies, as it breaks + // dependency checking. + InternalDontEnforceDependencies(); + } + + // For internal use only. + void internal_set_underlay(const DescriptorPool* underlay) + { + underlay_ = underlay; + } + + // For internal (unit test) use only: Returns true if a FileDescriptor has + // been constructed for the given file, false otherwise. Useful for testing + // lazy descriptor initialization behavior. + bool InternalIsFileLoaded(ConstStringParam filename) const; + + // Add a file to unused_import_track_files_. DescriptorBuilder will log + // warnings or errors for those files if there is any unused import. + void AddUnusedImportTrackFile(ConstStringParam file_name, bool is_error = false); + void ClearUnusedImportTrackFiles(); + + private: + friend class Descriptor; + friend class internal::LazyDescriptor; + friend class FieldDescriptor; + friend class EnumDescriptor; + friend class ServiceDescriptor; + friend class MethodDescriptor; + friend class FileDescriptor; + friend class DescriptorBuilder; + friend class FileDescriptorTables; + + // Return true if the given name is a sub-symbol of any non-package + // descriptor that already exists in the descriptor pool. (The full + // definition of such types is already known.) + bool IsSubSymbolOfBuiltType(StringPiece name) const; + + // Tries to find something in the fallback database and link in the + // corresponding proto file. Returns true if successful, in which case + // the caller should search for the thing again. These are declared + // const because they are called by (semantically) const methods. + bool TryFindFileInFallbackDatabase(StringPiece name) const; + bool TryFindSymbolInFallbackDatabase(StringPiece name) const; + bool TryFindExtensionInFallbackDatabase(const Descriptor* containing_type, int field_number) const; + + // This internal find extension method only check with its table and underlay + // descriptor_pool's table. It does not check with fallback DB and no + // additional proto file will be build in this method. + const FieldDescriptor* InternalFindExtensionByNumberNoLock( + const Descriptor* extendee, int number + ) const; + + // Like BuildFile() but called internally when the file has been loaded from + // fallback_database_. Declared const because it is called by (semantically) + // const methods. + const FileDescriptor* BuildFileFromDatabase( + const FileDescriptorProto& proto + ) const; + + // Helper for when lazily_build_dependencies_ is set, can look up a symbol + // after the file's descriptor is built, and can build the file where that + // symbol is defined if necessary. Will create a placeholder if the type + // doesn't exist in the fallback database, or the file doesn't build + // successfully. + Symbol CrossLinkOnDemandHelper(StringPiece name, bool expecting_enum) const; + + // Create a placeholder FileDescriptor of the specified name + FileDescriptor* NewPlaceholderFile(StringPiece name) const; + FileDescriptor* NewPlaceholderFileWithMutexHeld( + StringPiece name, internal::FlatAllocator& alloc + ) const; + + enum PlaceholderType + { + PLACEHOLDER_MESSAGE, + PLACEHOLDER_ENUM, + PLACEHOLDER_EXTENDABLE_MESSAGE + }; + // Create a placeholder Descriptor of the specified name + Symbol NewPlaceholder(StringPiece name, PlaceholderType placeholder_type) const; + Symbol NewPlaceholderWithMutexHeld(StringPiece name, PlaceholderType placeholder_type) const; + + // If fallback_database_ is nullptr, this is nullptr. Otherwise, this is a + // mutex which must be locked while accessing tables_. + internal::WrappedMutex* mutex_; + + // See constructor. + DescriptorDatabase* fallback_database_; + ErrorCollector* default_error_collector_; + const DescriptorPool* underlay_; + + // This class contains a lot of hash maps with complicated types that + // we'd like to keep out of the header. + class Tables; + std::unique_ptr tables_; + + bool enforce_dependencies_; + bool lazily_build_dependencies_; + bool allow_unknown_; + bool enforce_weak_; + bool disallow_enforce_utf8_; + + // Set of files to track for unused imports. The bool value when true means + // unused imports are treated as errors (and as warnings when false). + std::map unused_import_track_files_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DescriptorPool); + }; + +// inline methods ==================================================== + +// These macros makes this repetitive code more readable. +#define PROTOBUF_DEFINE_ACCESSOR(CLASS, FIELD, TYPE) \ + inline TYPE CLASS::FIELD() const \ + { \ + return FIELD##_; \ + } + +// Strings fields are stored as pointers but returned as const references. +#define PROTOBUF_DEFINE_STRING_ACCESSOR(CLASS, FIELD) \ + inline const std::string& CLASS::FIELD() const \ + { \ + return *FIELD##_; \ + } + +// Name and full name are stored in a single array to save space. +#define PROTOBUF_DEFINE_NAME_ACCESSOR(CLASS) \ + inline const std::string& CLASS::name() const \ + { \ + return all_names_[0]; \ + } \ + inline const std::string& CLASS::full_name() const \ + { \ + return all_names_[1]; \ + } + +// Arrays take an index parameter, obviously. +#define PROTOBUF_DEFINE_ARRAY_ACCESSOR(CLASS, FIELD, TYPE) \ + inline TYPE CLASS::FIELD(int index) const \ + { \ + return FIELD##s_ + index; \ + } + +#define PROTOBUF_DEFINE_OPTIONS_ACCESSOR(CLASS, TYPE) \ + inline const TYPE& CLASS::options() const \ + { \ + return *options_; \ + } + + PROTOBUF_DEFINE_NAME_ACCESSOR(Descriptor) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, file, const FileDescriptor*) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, containing_type, const Descriptor*) + + PROTOBUF_DEFINE_ACCESSOR(Descriptor, field_count, int) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, oneof_decl_count, int) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, real_oneof_decl_count, int) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, nested_type_count, int) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, enum_type_count, int) + + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, field, const FieldDescriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, oneof_decl, const OneofDescriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, nested_type, const Descriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, enum_type, const EnumDescriptor*) + + PROTOBUF_DEFINE_ACCESSOR(Descriptor, extension_range_count, int) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, extension_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, extension_range, const Descriptor::ExtensionRange*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, extension, const FieldDescriptor*) + + PROTOBUF_DEFINE_ACCESSOR(Descriptor, reserved_range_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, reserved_range, const Descriptor::ReservedRange*) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, reserved_name_count, int) + + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(Descriptor, MessageOptions) + PROTOBUF_DEFINE_ACCESSOR(Descriptor, is_placeholder, bool) + + PROTOBUF_DEFINE_NAME_ACCESSOR(FieldDescriptor) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, file, const FileDescriptor*) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, number, int) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, is_extension, bool) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, containing_type, const Descriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(FieldDescriptor, FieldOptions) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, has_default_value, bool) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, has_json_name, bool) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_int32_t, int32_t) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_int64_t, int64_t) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_uint32_t, uint32_t) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_uint64_t, uint64_t) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_float, float) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_double, double) + PROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_bool, bool) + PROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, default_value_string) + + PROTOBUF_DEFINE_NAME_ACCESSOR(OneofDescriptor) + PROTOBUF_DEFINE_ACCESSOR(OneofDescriptor, containing_type, const Descriptor*) + PROTOBUF_DEFINE_ACCESSOR(OneofDescriptor, field_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(OneofDescriptor, field, const FieldDescriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(OneofDescriptor, OneofOptions) + + PROTOBUF_DEFINE_NAME_ACCESSOR(EnumDescriptor) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, file, const FileDescriptor*) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, containing_type, const Descriptor*) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, value_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(EnumDescriptor, value, const EnumValueDescriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(EnumDescriptor, EnumOptions) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, is_placeholder, bool) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, reserved_range_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(EnumDescriptor, reserved_range, const EnumDescriptor::ReservedRange*) + PROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, reserved_name_count, int) + + PROTOBUF_DEFINE_NAME_ACCESSOR(EnumValueDescriptor) + PROTOBUF_DEFINE_ACCESSOR(EnumValueDescriptor, number, int) + PROTOBUF_DEFINE_ACCESSOR(EnumValueDescriptor, type, const EnumDescriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(EnumValueDescriptor, EnumValueOptions) + + PROTOBUF_DEFINE_NAME_ACCESSOR(ServiceDescriptor) + PROTOBUF_DEFINE_ACCESSOR(ServiceDescriptor, file, const FileDescriptor*) + PROTOBUF_DEFINE_ACCESSOR(ServiceDescriptor, method_count, int) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(ServiceDescriptor, method, const MethodDescriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(ServiceDescriptor, ServiceOptions) + + PROTOBUF_DEFINE_NAME_ACCESSOR(MethodDescriptor) + PROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, service, const ServiceDescriptor*) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(MethodDescriptor, MethodOptions) + PROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, client_streaming, bool) + PROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, server_streaming, bool) + + PROTOBUF_DEFINE_STRING_ACCESSOR(FileDescriptor, name) + PROTOBUF_DEFINE_STRING_ACCESSOR(FileDescriptor, package) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, pool, const DescriptorPool*) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, dependency_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, public_dependency_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, weak_dependency_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, message_type_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, enum_type_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, service_count, int) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, extension_count, int) + PROTOBUF_DEFINE_OPTIONS_ACCESSOR(FileDescriptor, FileOptions) + PROTOBUF_DEFINE_ACCESSOR(FileDescriptor, is_placeholder, bool) + + PROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, message_type, const Descriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, enum_type, const EnumDescriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, service, const ServiceDescriptor*) + PROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, extension, const FieldDescriptor*) + +#undef PROTOBUF_DEFINE_ACCESSOR +#undef PROTOBUF_DEFINE_STRING_ACCESSOR +#undef PROTOBUF_DEFINE_ARRAY_ACCESSOR + + // A few accessors differ from the macros... + + inline Descriptor::WellKnownType Descriptor::well_known_type() const + { + return static_cast(well_known_type_); + } + + inline bool Descriptor::IsExtensionNumber(int number) const + { + return FindExtensionRangeContainingNumber(number) != nullptr; + } + + inline bool Descriptor::IsReservedNumber(int number) const + { + return FindReservedRangeContainingNumber(number) != nullptr; + } + + inline bool Descriptor::IsReservedName(ConstStringParam name) const + { + for (int i = 0; i < reserved_name_count(); i++) + { + if (name == static_cast(reserved_name(i))) + { + return true; + } + } + return false; + } + + // Can't use PROTOBUF_DEFINE_ARRAY_ACCESSOR because reserved_names_ is actually + // an array of pointers rather than the usual array of objects. + inline const std::string& Descriptor::reserved_name(int index) const + { + return *reserved_names_[index]; + } + + inline bool EnumDescriptor::IsReservedNumber(int number) const + { + return FindReservedRangeContainingNumber(number) != nullptr; + } + + inline bool EnumDescriptor::IsReservedName(ConstStringParam name) const + { + for (int i = 0; i < reserved_name_count(); i++) + { + if (name == static_cast(reserved_name(i))) + { + return true; + } + } + return false; + } + + // Can't use PROTOBUF_DEFINE_ARRAY_ACCESSOR because reserved_names_ is actually + // an array of pointers rather than the usual array of objects. + inline const std::string& EnumDescriptor::reserved_name(int index) const + { + return *reserved_names_[index]; + } + + inline const std::string& FieldDescriptor::lowercase_name() const + { + return all_names_[lowercase_name_index_]; + } + + inline const std::string& FieldDescriptor::camelcase_name() const + { + return all_names_[camelcase_name_index_]; + } + + inline const std::string& FieldDescriptor::json_name() const + { + return all_names_[json_name_index_]; + } + + inline const OneofDescriptor* FieldDescriptor::containing_oneof() const + { + return is_oneof_ ? scope_.containing_oneof : nullptr; + } + + inline int FieldDescriptor::index_in_oneof() const + { + GOOGLE_DCHECK(is_oneof_); + return static_cast(this - scope_.containing_oneof->field(0)); + } + + inline const Descriptor* FieldDescriptor::extension_scope() const + { + GOOGLE_CHECK(is_extension_); + return scope_.extension_scope; + } + + inline FieldDescriptor::Label FieldDescriptor::label() const + { + return static_cast(a))...); + } + + // Like FullMatch(), except that "re" is allowed to match a substring + // of "text". + // + // Returns true iff all of the following conditions are satisfied: + // a. "text" matches "re" partially - for some substring of "text". + // b. The number of matched sub-patterns is >= number of supplied pointers. + // c. The "i"th argument has a suitable type for holding the + // string captured as the "i"th sub-pattern. If you pass in + // NULL for the "i"th argument, or pass fewer arguments than + // number of sub-patterns, the "i"th captured sub-pattern is + // ignored. + template + static bool PartialMatch(absl::string_view text, const RE2& re, A&&... a) + { + return Apply(PartialMatchN, text, re, Arg(std::forward(a))...); + } + + // Like FullMatch() and PartialMatch(), except that "re" has to match + // a prefix of the text, and "input" is advanced past the matched + // text. Note: "input" is modified iff this routine returns true + // and "re" matched a non-empty substring of "input". + // + // Returns true iff all of the following conditions are satisfied: + // a. "input" matches "re" partially - for some prefix of "input". + // b. The number of matched sub-patterns is >= number of supplied pointers. + // c. The "i"th argument has a suitable type for holding the + // string captured as the "i"th sub-pattern. If you pass in + // NULL for the "i"th argument, or pass fewer arguments than + // number of sub-patterns, the "i"th captured sub-pattern is + // ignored. + template + static bool Consume(absl::string_view* input, const RE2& re, A&&... a) + { + return Apply(ConsumeN, input, re, Arg(std::forward(a))...); + } + + // Like Consume(), but does not anchor the match at the beginning of + // the text. That is, "re" need not start its match at the beginning + // of "input". For example, "FindAndConsume(s, "(\\w+)", &word)" finds + // the next word in "s" and stores it in "word". + // + // Returns true iff all of the following conditions are satisfied: + // a. "input" matches "re" partially - for some substring of "input". + // b. The number of matched sub-patterns is >= number of supplied pointers. + // c. The "i"th argument has a suitable type for holding the + // string captured as the "i"th sub-pattern. If you pass in + // NULL for the "i"th argument, or pass fewer arguments than + // number of sub-patterns, the "i"th captured sub-pattern is + // ignored. + template + static bool FindAndConsume(absl::string_view* input, const RE2& re, A&&... a) + { + return Apply(FindAndConsumeN, input, re, Arg(std::forward(a))...); + } + + // Replace the first match of "re" in "str" with "rewrite". + // Within "rewrite", backslash-escaped digits (\1 to \9) can be + // used to insert text matching corresponding parenthesized group + // from the pattern. \0 in "rewrite" refers to the entire matching + // text. E.g., + // + // std::string s = "yabba dabba doo"; + // CHECK(RE2::Replace(&s, "b+", "d")); + // + // will leave "s" containing "yada dabba doo" + // + // Returns true if the pattern matches and a replacement occurs, + // false otherwise. + static bool Replace(std::string* str, const RE2& re, absl::string_view rewrite); + + // Like Replace(), except replaces successive non-overlapping occurrences + // of the pattern in the string with the rewrite. E.g. + // + // std::string s = "yabba dabba doo"; + // CHECK(RE2::GlobalReplace(&s, "b+", "d")); + // + // will leave "s" containing "yada dada doo" + // Replacements are not subject to re-matching. + // + // Because GlobalReplace only replaces non-overlapping matches, + // replacing "ana" within "banana" makes only one replacement, not two. + // + // Returns the number of replacements made. + static int GlobalReplace(std::string* str, const RE2& re, absl::string_view rewrite); + + // Like Replace, except that if the pattern matches, "rewrite" + // is copied into "out" with substitutions. The non-matching + // portions of "text" are ignored. + // + // Returns true iff a match occurred and the extraction happened + // successfully; if no match occurs, the string is left unaffected. + // + // REQUIRES: "text" must not alias any part of "*out". + static bool Extract(absl::string_view text, const RE2& re, absl::string_view rewrite, std::string* out); + + // Escapes all potentially meaningful regexp characters in + // 'unquoted'. The returned string, used as a regular expression, + // will match exactly the original string. For example, + // 1.5-2.0? + // may become: + // 1\.5\-2\.0\? + static std::string QuoteMeta(absl::string_view unquoted); + + // Computes range for any strings matching regexp. The min and max can in + // some cases be arbitrarily precise, so the caller gets to specify the + // maximum desired length of string returned. + // + // Assuming PossibleMatchRange(&min, &max, N) returns successfully, any + // string s that is an anchored match for this regexp satisfies + // min <= s && s <= max. + // + // Note that PossibleMatchRange() will only consider the first copy of an + // infinitely repeated element (i.e., any regexp element followed by a '*' or + // '+' operator). Regexps with "{N}" constructions are not affected, as those + // do not compile down to infinite repetitions. + // + // Returns true on success, false on error. + bool PossibleMatchRange(std::string* min, std::string* max, int maxlen) const; + + // Generic matching interface + + // Type of match. + enum Anchor + { + UNANCHORED, // No anchoring + ANCHOR_START, // Anchor at start only + ANCHOR_BOTH // Anchor at start and end + }; + + // Return the number of capturing subpatterns, or -1 if the + // regexp wasn't valid on construction. The overall match ($0) + // does not count: if the regexp is "(a)(b)", returns 2. + int NumberOfCapturingGroups() const + { + return num_captures_; + } + + // Return a map from names to capturing indices. + // The map records the index of the leftmost group + // with the given name. + // Only valid until the re is deleted. + const std::map& NamedCapturingGroups() const; + + // Return a map from capturing indices to names. + // The map has no entries for unnamed groups. + // Only valid until the re is deleted. + const std::map& CapturingGroupNames() const; + + // General matching routine. + // Match against text starting at offset startpos + // and stopping the search at offset endpos. + // Returns true if match found, false if not. + // On a successful match, fills in submatch[] (up to nsubmatch entries) + // with information about submatches. + // I.e. matching RE2("(foo)|(bar)baz") on "barbazbla" will return true, with + // submatch[0] = "barbaz", submatch[1].data() = NULL, submatch[2] = "bar", + // submatch[3].data() = NULL, ..., up to submatch[nsubmatch-1].data() = NULL. + // Caveat: submatch[] may be clobbered even on match failure. + // + // Don't ask for more match information than you will use: + // runs much faster with nsubmatch == 1 than nsubmatch > 1, and + // runs even faster if nsubmatch == 0. + // Doesn't make sense to use nsubmatch > 1 + NumberOfCapturingGroups(), + // but will be handled correctly. + // + // Passing text == absl::string_view() will be handled like any other + // empty string, but note that on return, it will not be possible to tell + // whether submatch i matched the empty string or did not match: + // either way, submatch[i].data() == NULL. + bool Match(absl::string_view text, size_t startpos, size_t endpos, Anchor re_anchor, absl::string_view* submatch, int nsubmatch) const; + + // Check that the given rewrite string is suitable for use with this + // regular expression. It checks that: + // * The regular expression has enough parenthesized subexpressions + // to satisfy all of the \N tokens in rewrite + // * The rewrite string doesn't have any syntax errors. E.g., + // '\' followed by anything other than a digit or '\'. + // A true return value guarantees that Replace() and Extract() won't + // fail because of a bad rewrite string. + bool CheckRewriteString(absl::string_view rewrite, std::string* error) const; + + // Returns the maximum submatch needed for the rewrite to be done by + // Replace(). E.g. if rewrite == "foo \\2,\\1", returns 2. + static int MaxSubmatch(absl::string_view rewrite); + + // Append the "rewrite" string, with backslash substitutions from "vec", + // to string "out". + // Returns true on success. This method can fail because of a malformed + // rewrite string. CheckRewriteString guarantees that the rewrite will + // be sucessful. + bool Rewrite(std::string* out, absl::string_view rewrite, const absl::string_view* vec, int veclen) const; + + // Constructor options + class Options + { + public: + // The options are (defaults in parentheses): + // + // utf8 (true) text and pattern are UTF-8; otherwise Latin-1 + // posix_syntax (false) restrict regexps to POSIX egrep syntax + // longest_match (false) search for longest match, not first match + // log_errors (true) log syntax and execution errors to ERROR + // max_mem (see below) approx. max memory footprint of RE2 + // literal (false) interpret string as literal, not regexp + // never_nl (false) never match \n, even if it is in regexp + // dot_nl (false) dot matches everything including new line + // never_capture (false) parse all parens as non-capturing + // case_sensitive (true) match is case-sensitive (regexp can override + // with (?i) unless in posix_syntax mode) + // + // The following options are only consulted when posix_syntax == true. + // When posix_syntax == false, these features are always enabled and + // cannot be turned off; to perform multi-line matching in that case, + // begin the regexp with (?m). + // perl_classes (false) allow Perl's \d \s \w \D \S \W + // word_boundary (false) allow Perl's \b \B (word boundary and not) + // one_line (false) ^ and $ only match beginning and end of text + // + // The max_mem option controls how much memory can be used + // to hold the compiled form of the regexp (the Prog) and + // its cached DFA graphs. Code Search placed limits on the number + // of Prog instructions and DFA states: 10,000 for both. + // In RE2, those limits would translate to about 240 KB per Prog + // and perhaps 2.5 MB per DFA (DFA state sizes vary by regexp; RE2 does a + // better job of keeping them small than Code Search did). + // Each RE2 has two Progs (one forward, one reverse), and each Prog + // can have two DFAs (one first match, one longest match). + // That makes 4 DFAs: + // + // forward, first-match - used for UNANCHORED or ANCHOR_START searches + // if opt.longest_match() == false + // forward, longest-match - used for all ANCHOR_BOTH searches, + // and the other two kinds if + // opt.longest_match() == true + // reverse, first-match - never used + // reverse, longest-match - used as second phase for unanchored searches + // + // The RE2 memory budget is statically divided between the two + // Progs and then the DFAs: two thirds to the forward Prog + // and one third to the reverse Prog. The forward Prog gives half + // of what it has left over to each of its DFAs. The reverse Prog + // gives it all to its longest-match DFA. + // + // Once a DFA fills its budget, it flushes its cache and starts over. + // If this happens too often, RE2 falls back on the NFA implementation. + + // For now, make the default budget something close to Code Search. + static const int kDefaultMaxMem = 8 << 20; + + enum Encoding + { + EncodingUTF8 = 1, + EncodingLatin1 + }; + + Options() : + max_mem_(kDefaultMaxMem), + encoding_(EncodingUTF8), + posix_syntax_(false), + longest_match_(false), + log_errors_(true), + literal_(false), + never_nl_(false), + dot_nl_(false), + never_capture_(false), + case_sensitive_(true), + perl_classes_(false), + word_boundary_(false), + one_line_(false) + { + } + + /*implicit*/ Options(CannedOptions); + + int64_t max_mem() const + { + return max_mem_; + } + void set_max_mem(int64_t m) + { + max_mem_ = m; + } + + Encoding encoding() const + { + return encoding_; + } + void set_encoding(Encoding encoding) + { + encoding_ = encoding; + } + + bool posix_syntax() const + { + return posix_syntax_; + } + void set_posix_syntax(bool b) + { + posix_syntax_ = b; + } + + bool longest_match() const + { + return longest_match_; + } + void set_longest_match(bool b) + { + longest_match_ = b; + } + + bool log_errors() const + { + return log_errors_; + } + void set_log_errors(bool b) + { + log_errors_ = b; + } + + bool literal() const + { + return literal_; + } + void set_literal(bool b) + { + literal_ = b; + } + + bool never_nl() const + { + return never_nl_; + } + void set_never_nl(bool b) + { + never_nl_ = b; + } + + bool dot_nl() const + { + return dot_nl_; + } + void set_dot_nl(bool b) + { + dot_nl_ = b; + } + + bool never_capture() const + { + return never_capture_; + } + void set_never_capture(bool b) + { + never_capture_ = b; + } + + bool case_sensitive() const + { + return case_sensitive_; + } + void set_case_sensitive(bool b) + { + case_sensitive_ = b; + } + + bool perl_classes() const + { + return perl_classes_; + } + void set_perl_classes(bool b) + { + perl_classes_ = b; + } + + bool word_boundary() const + { + return word_boundary_; + } + void set_word_boundary(bool b) + { + word_boundary_ = b; + } + + bool one_line() const + { + return one_line_; + } + void set_one_line(bool b) + { + one_line_ = b; + } + + void Copy(const Options& src) + { + *this = src; + } + + int ParseFlags() const; + + private: + int64_t max_mem_; + Encoding encoding_; + bool posix_syntax_; + bool longest_match_; + bool log_errors_; + bool literal_; + bool never_nl_; + bool dot_nl_; + bool never_capture_; + bool case_sensitive_; + bool perl_classes_; + bool word_boundary_; + bool one_line_; + }; + + // Returns the options set in the constructor. + const Options& options() const + { + return options_; + } + + // Argument converters; see below. + template + static Arg CRadix(T* ptr); + template + static Arg Hex(T* ptr); + template + static Arg Octal(T* ptr); + + // Controls the maximum count permitted by GlobalReplace(); -1 is unlimited. + // FOR FUZZING ONLY. + static void FUZZING_ONLY_set_maximum_global_replace_count(int i); + + private: + void Init(absl::string_view pattern, const Options& options); + + bool DoMatch(absl::string_view text, Anchor re_anchor, size_t* consumed, const Arg* const args[], int n) const; + + re2::Prog* ReverseProg() const; + + // First cache line is relatively cold fields. + const std::string* pattern_; // string regular expression + Options options_; // option flags + re2::Regexp* entire_regexp_; // parsed regular expression + re2::Regexp* suffix_regexp_; // parsed regular expression, prefix_ removed + const std::string* error_; // error indicator (or points to empty string) + const std::string* error_arg_; // fragment of regexp showing error (or ditto) + + // Second cache line is relatively hot fields. + // These are ordered oddly to pack everything. + int num_captures_; // number of capturing groups + ErrorCode error_code_ : 29; // error code (29 bits is more than enough) + bool longest_match_ : 1; // cached copy of options_.longest_match() + bool is_one_pass_ : 1; // can use prog_->SearchOnePass? + bool prefix_foldcase_ : 1; // prefix_ is ASCII case-insensitive + std::string prefix_; // required prefix (before suffix_regexp_) + re2::Prog* prog_; // compiled program for regexp + + // Reverse Prog for DFA execution only + mutable re2::Prog* rprog_; + // Map from capture names to indices + mutable const std::map* named_groups_; + // Map from capture indices to names + mutable const std::map* group_names_; + + mutable absl::once_flag rprog_once_; + mutable absl::once_flag named_groups_once_; + mutable absl::once_flag group_names_once_; + }; + + /***** Implementation details *****/ + + namespace re2_internal + { + + // Types for which the 3-ary Parse() function template has specializations. + template + struct Parse3ary : public std::false_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + template<> + struct Parse3ary : public std::true_type + { + }; + + template + bool Parse(const char* str, size_t n, T* dest); + + // Types for which the 4-ary Parse() function template has specializations. + template + struct Parse4ary : public std::false_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + template<> + struct Parse4ary : public std::true_type + { + }; + + template + bool Parse(const char* str, size_t n, T* dest, int radix); + + // Support absl::optional for all T with a stock parser. + template + struct Parse3ary> : public Parse3ary + { + }; + template + struct Parse4ary> : public Parse4ary + { + }; + + template + bool Parse(const char* str, size_t n, absl::optional* dest) + { + if (str == NULL) + { + if (dest != NULL) + dest->reset(); + return true; + } + T tmp; + if (Parse(str, n, &tmp)) + { + if (dest != NULL) + dest->emplace(std::move(tmp)); + return true; + } + return false; + } + + template + bool Parse(const char* str, size_t n, absl::optional* dest, int radix) + { + if (str == NULL) + { + if (dest != NULL) + dest->reset(); + return true; + } + T tmp; + if (Parse(str, n, &tmp, radix)) + { + if (dest != NULL) + dest->emplace(std::move(tmp)); + return true; + } + return false; + } + + } // namespace re2_internal + + class RE2::Arg + { + private: + template + using CanParse3ary = typename std::enable_if< + re2_internal::Parse3ary::value, + int>::type; + + template + using CanParse4ary = typename std::enable_if< + re2_internal::Parse4ary::value, + int>::type; + +#if !defined(_MSC_VER) + template + using CanParseFrom = typename std::enable_if< + std::is_member_function_pointer< + decltype(static_cast( + &T::ParseFrom + ))>::value, + int>::type; +#endif + + public: + Arg() : + Arg(nullptr) + { + } + Arg(std::nullptr_t ptr) : + arg_(ptr), + parser_(DoNothing) + { + } + + template = 0> + Arg(T* ptr) : + arg_(ptr), + parser_(DoParse3ary) + { + } + + template = 0> + Arg(T* ptr) : + arg_(ptr), + parser_(DoParse4ary) + { + } + +#if !defined(_MSC_VER) + template = 0> + Arg(T* ptr) : + arg_(ptr), + parser_(DoParseFrom) + { + } +#endif + + typedef bool (*Parser)(const char* str, size_t n, void* dest); + + template + Arg(T* ptr, Parser parser) : + arg_(ptr), + parser_(parser) + { + } + + bool Parse(const char* str, size_t n) const + { + return (*parser_)(str, n, arg_); + } + + private: + static bool DoNothing(const char* /*str*/, size_t /*n*/, void* /*dest*/) + { + return true; + } + + template + static bool DoParse3ary(const char* str, size_t n, void* dest) + { + return re2_internal::Parse(str, n, reinterpret_cast(dest)); + } + + template + static bool DoParse4ary(const char* str, size_t n, void* dest) + { + return re2_internal::Parse(str, n, reinterpret_cast(dest), 10); + } + +#if !defined(_MSC_VER) + template + static bool DoParseFrom(const char* str, size_t n, void* dest) + { + if (dest == NULL) + return true; + return reinterpret_cast(dest)->ParseFrom(str, n); + } +#endif + + void* arg_; + Parser parser_; + }; + + template + inline RE2::Arg RE2::CRadix(T* ptr) + { + return RE2::Arg(ptr, [](const char* str, size_t n, void* dest) -> bool + { return re2_internal::Parse(str, n, reinterpret_cast(dest), 0); }); + } + + template + inline RE2::Arg RE2::Hex(T* ptr) + { + return RE2::Arg(ptr, [](const char* str, size_t n, void* dest) -> bool + { return re2_internal::Parse(str, n, reinterpret_cast(dest), 16); }); + } + + template + inline RE2::Arg RE2::Octal(T* ptr) + { + return RE2::Arg(ptr, [](const char* str, size_t n, void* dest) -> bool + { return re2_internal::Parse(str, n, reinterpret_cast(dest), 8); }); + } + +// Silence warnings about missing initializers for members of LazyRE2. +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 6 +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif + + // Helper for writing global or static RE2s safely. + // Write + // static LazyRE2 re = {".*"}; + // and then use *re instead of writing + // static RE2 re(".*"); + // The former is more careful about multithreaded + // situations than the latter. + // + // N.B. This class never deletes the RE2 object that + // it constructs: that's a feature, so that it can be used + // for global and function static variables. + class LazyRE2 + { + private: + struct NoArg + { + }; + + public: + typedef RE2 element_type; // support std::pointer_traits + + // Constructor omitted to preserve braced initialization in C++98. + + // Pretend to be a pointer to Type (never NULL due to on-demand creation): + RE2& operator*() const + { + return *get(); + } + RE2* operator->() const + { + return get(); + } + + // Named accessor/initializer: + RE2* get() const + { + absl::call_once(once_, &LazyRE2::Init, this); + return ptr_; + } + + // All data fields must be public to support {"foo"} initialization. + const char* pattern_; + RE2::CannedOptions options_; + NoArg barrier_against_excess_initializers_; + + mutable RE2* ptr_; + mutable absl::once_flag once_; + + private: + static void Init(const LazyRE2* lazy_re2) + { + lazy_re2->ptr_ = new RE2(lazy_re2->pattern_, lazy_re2->options_); + } + + void operator=(const LazyRE2&); // disallowed + }; + + namespace hooks + { + +// Most platforms support thread_local. Older versions of iOS don't support +// thread_local, but for the sake of brevity, we lump together all versions +// of Apple platforms that aren't macOS. If an iOS application really needs +// the context pointee someday, we can get more specific then... +// +// As per https://github.com/google/re2/issues/325, thread_local support in +// MinGW seems to be buggy. (FWIW, Abseil folks also avoid it.) +#define RE2_HAVE_THREAD_LOCAL +#if (defined(__APPLE__) && !(defined(TARGET_OS_OSX) && TARGET_OS_OSX)) || defined(__MINGW32__) +#undef RE2_HAVE_THREAD_LOCAL +#endif + +// A hook must not make any assumptions regarding the lifetime of the context +// pointee beyond the current invocation of the hook. Pointers and references +// obtained via the context pointee should be considered invalidated when the +// hook returns. Hence, any data about the context pointee (e.g. its pattern) +// would have to be copied in order for it to be kept for an indefinite time. +// +// A hook must not use RE2 for matching. Control flow reentering RE2::Match() +// could result in infinite mutual recursion. To discourage that possibility, +// RE2 will not maintain the context pointer correctly when used in that way. +#ifdef RE2_HAVE_THREAD_LOCAL + extern thread_local const RE2* context; +#endif + + struct DFAStateCacheReset + { + int64_t state_budget; + size_t state_cache_size; + }; + + struct DFASearchFailure + { + // Nothing yet... + }; + +#define DECLARE_HOOK(type) \ + using type##Callback = void(const type&); \ + void Set##type##Hook(type##Callback* cb); \ + type##Callback* Get##type##Hook(); + + DECLARE_HOOK(DFAStateCacheReset) + DECLARE_HOOK(DFASearchFailure) + +#undef DECLARE_HOOK + + } // namespace hooks + +} // namespace re2 + +using re2::LazyRE2; +using re2::RE2; + +#endif // RE2_RE2_H_ diff --git a/CAPI/cpp/grpc/include/re2/set.h b/CAPI/cpp/grpc/include/re2/set.h new file mode 100644 index 00000000..8798f9f1 --- /dev/null +++ b/CAPI/cpp/grpc/include/re2/set.h @@ -0,0 +1,90 @@ +// Copyright 2010 The RE2 Authors. All Rights Reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#ifndef RE2_SET_H_ +#define RE2_SET_H_ + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "re2/re2.h" + +namespace re2 +{ + class Prog; + class Regexp; +} // namespace re2 + +namespace re2 +{ + + // An RE2::Set represents a collection of regexps that can + // be searched for simultaneously. + class RE2::Set + { + public: + enum ErrorKind + { + kNoError = 0, + kNotCompiled, // The set is not compiled. + kOutOfMemory, // The DFA ran out of memory. + kInconsistent, // The result is inconsistent. This should never happen. + }; + + struct ErrorInfo + { + ErrorKind kind; + }; + + Set(const RE2::Options& options, RE2::Anchor anchor); + ~Set(); + + // Not copyable. + Set(const Set&) = delete; + Set& operator=(const Set&) = delete; + // Movable. + Set(Set&& other); + Set& operator=(Set&& other); + + // Adds pattern to the set using the options passed to the constructor. + // Returns the index that will identify the regexp in the output of Match(), + // or -1 if the regexp cannot be parsed. + // Indices are assigned in sequential order starting from 0. + // Errors do not increment the index; if error is not NULL, *error will hold + // the error message from the parser. + int Add(absl::string_view pattern, std::string* error); + + // Compiles the set in preparation for matching. + // Returns false if the compiler runs out of memory. + // Add() must not be called again after Compile(). + // Compile() must be called before Match(). + bool Compile(); + + // Returns true if text matches at least one of the regexps in the set. + // Fills v (if not NULL) with the indices of the matching regexps. + // Callers must not expect v to be sorted. + bool Match(absl::string_view text, std::vector* v) const; + + // As above, but populates error_info (if not NULL) when none of the regexps + // in the set matched. This can inform callers when DFA execution fails, for + // example, because they might wish to handle that case differently. + bool Match(absl::string_view text, std::vector* v, ErrorInfo* error_info) const; + + private: + typedef std::pair Elem; + + RE2::Options options_; + RE2::Anchor anchor_; + std::vector elem_; + bool compiled_; + int size_; + std::unique_ptr prog_; + }; + +} // namespace re2 + +#endif // RE2_SET_H_ diff --git a/CAPI/cpp/grpc/include/re2/stringpiece.h b/CAPI/cpp/grpc/include/re2/stringpiece.h new file mode 100644 index 00000000..4097b323 --- /dev/null +++ b/CAPI/cpp/grpc/include/re2/stringpiece.h @@ -0,0 +1,19 @@ +// Copyright 2022 The RE2 Authors. All Rights Reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#ifndef RE2_STRINGPIECE_H_ +#define RE2_STRINGPIECE_H_ + +#include "absl/strings/string_view.h" + +namespace re2 +{ + + // Until RE2 requires C++17 and uses std::string_view, allow users to + // continue to #include "re2/stringpiece.h" and use re2::StringPiece. + using StringPiece = absl::string_view; + +} // namespace re2 + +#endif // RE2_STRINGPIECE_H_ diff --git a/CAPI/cpp/grpc/include/upb/arena.h b/CAPI/cpp/grpc/include/upb/arena.h new file mode 100644 index 00000000..976604fa --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/arena.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_ARENA_H_ +#define UPB_ARENA_H_ + +#include +#include +#include +#include + +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /** upb_alloc *****************************************************************/ + + /* A upb_alloc is a possibly-stateful allocator object. + * + * It could either be an arena allocator (which doesn't require individual + * free() calls) or a regular malloc() (which does). The client must therefore + * free memory unless it knows that the allocator is an arena allocator. */ + + struct upb_alloc; + typedef struct upb_alloc upb_alloc; + + /* A malloc()/free() function. + * If "size" is 0 then the function acts like free(), otherwise it acts like + * realloc(). Only "oldsize" bytes from a previous allocation are preserved. */ + typedef void* upb_alloc_func(upb_alloc* alloc, void* ptr, size_t oldsize, size_t size); + + struct upb_alloc + { + upb_alloc_func* func; + }; + + UPB_INLINE void* upb_malloc(upb_alloc* alloc, size_t size) + { + UPB_ASSERT(alloc); + return alloc->func(alloc, NULL, 0, size); + } + + UPB_INLINE void* upb_realloc(upb_alloc* alloc, void* ptr, size_t oldsize, size_t size) + { + UPB_ASSERT(alloc); + return alloc->func(alloc, ptr, oldsize, size); + } + + UPB_INLINE void upb_free(upb_alloc* alloc, void* ptr) + { + assert(alloc); + alloc->func(alloc, ptr, 0, 0); + } + + /* The global allocator used by upb. Uses the standard malloc()/free(). */ + + extern upb_alloc upb_alloc_global; + + /* Functions that hard-code the global malloc. + * + * We still get benefit because we can put custom logic into our global + * allocator, like injecting out-of-memory faults in debug/testing builds. */ + + UPB_INLINE void* upb_gmalloc(size_t size) + { + return upb_malloc(&upb_alloc_global, size); + } + + UPB_INLINE void* upb_grealloc(void* ptr, size_t oldsize, size_t size) + { + return upb_realloc(&upb_alloc_global, ptr, oldsize, size); + } + + UPB_INLINE void upb_gfree(void* ptr) + { + upb_free(&upb_alloc_global, ptr); + } + + /* upb_Arena ******************************************************************/ + + /* upb_Arena is a specific allocator implementation that uses arena allocation. + * The user provides an allocator that will be used to allocate the underlying + * arena blocks. Arenas by nature do not require the individual allocations + * to be freed. However the Arena does allow users to register cleanup + * functions that will run when the arena is destroyed. + * + * A upb_Arena is *not* thread-safe. + * + * You could write a thread-safe arena allocator that satisfies the + * upb_alloc interface, but it would not be as efficient for the + * single-threaded case. */ + + typedef void upb_CleanupFunc(void* ud); + + struct upb_Arena; + typedef struct upb_Arena upb_Arena; + + typedef struct + { + /* We implement the allocator interface. + * This must be the first member of upb_Arena! + * TODO(haberman): remove once handlers are gone. */ + upb_alloc alloc; + + char *ptr, *end; + } _upb_ArenaHead; + + /* Creates an arena from the given initial block (if any -- n may be 0). + * Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this + * is a fixed-size arena and cannot grow. */ + upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc); + void upb_Arena_Free(upb_Arena* a); + bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func); + bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b); + void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size); + + UPB_INLINE upb_alloc* upb_Arena_Alloc(upb_Arena* a) + { + return (upb_alloc*)a; + } + + UPB_INLINE size_t _upb_ArenaHas(upb_Arena* a) + { + _upb_ArenaHead* h = (_upb_ArenaHead*)a; + return (size_t)(h->end - h->ptr); + } + + UPB_INLINE void* _upb_Arena_FastMalloc(upb_Arena* a, size_t size) + { + _upb_ArenaHead* h = (_upb_ArenaHead*)a; + void* ret = h->ptr; + UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret); + UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size); + UPB_UNPOISON_MEMORY_REGION(ret, size); + + h->ptr += size; + +#if UPB_ASAN + { + size_t guard_size = 32; + if (_upb_ArenaHas(a) >= guard_size) + { + h->ptr += guard_size; + } + else + { + h->ptr = h->end; + } + } +#endif + + return ret; + } + + UPB_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) + { + size = UPB_ALIGN_MALLOC(size); + + if (UPB_UNLIKELY(_upb_ArenaHas(a) < size)) + { + return _upb_Arena_SlowMalloc(a, size); + } + + return _upb_Arena_FastMalloc(a, size); + } + + // Shrinks the last alloc from arena. + // REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena. + // We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if + // this was not the last alloc. + UPB_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr, size_t oldsize, size_t size) + { + _upb_ArenaHead* h = (_upb_ArenaHead*)a; + oldsize = UPB_ALIGN_MALLOC(oldsize); + size = UPB_ALIGN_MALLOC(size); + UPB_ASSERT((char*)ptr + oldsize == h->ptr); // Must be the last alloc. + UPB_ASSERT(size <= oldsize); + h->ptr = (char*)ptr + size; + } + + UPB_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize, size_t size) + { + _upb_ArenaHead* h = (_upb_ArenaHead*)a; + oldsize = UPB_ALIGN_MALLOC(oldsize); + size = UPB_ALIGN_MALLOC(size); + bool is_most_recent_alloc = (uintptr_t)ptr + oldsize == (uintptr_t)h->ptr; + + if (is_most_recent_alloc) + { + ptrdiff_t diff = size - oldsize; + if ((ptrdiff_t)_upb_ArenaHas(a) >= diff) + { + h->ptr += diff; + return ptr; + } + } + else if (size <= oldsize) + { + return ptr; + } + + void* ret = upb_Arena_Malloc(a, size); + + if (ret && oldsize > 0) + { + memcpy(ret, ptr, UPB_MIN(oldsize, size)); + } + + return ret; + } + + UPB_INLINE upb_Arena* upb_Arena_New(void) + { + return upb_Arena_Init(NULL, 0, &upb_alloc_global); + } + +#include "upb/port_undef.inc" + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_ARENA_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/array.h b/CAPI/cpp/grpc/include/upb/array.h new file mode 100644 index 00000000..a122f1d3 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/array.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_ARRAY_H_ +#define UPB_ARRAY_H_ + +#include "google/protobuf/descriptor.upb.h" +#include "upb/message_value.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* Creates a new array on the given arena that holds elements of this type. */ + upb_Array* upb_Array_New(upb_Arena* a, upb_CType type); + + /* Returns the size of the array. */ + size_t upb_Array_Size(const upb_Array* arr); + + /* Returns the given element, which must be within the array's current size. */ + upb_MessageValue upb_Array_Get(const upb_Array* arr, size_t i); + + /* Sets the given element, which must be within the array's current size. */ + void upb_Array_Set(upb_Array* arr, size_t i, upb_MessageValue val); + + /* Appends an element to the array. Returns false on allocation failure. */ + bool upb_Array_Append(upb_Array* array, upb_MessageValue val, upb_Arena* arena); + + /* Moves elements within the array using memmove(). Like memmove(), the source + * and destination elements may be overlapping. */ + void upb_Array_Move(upb_Array* array, size_t dst_idx, size_t src_idx, size_t count); + + /* Inserts one or more empty elements into the array. Existing elements are + * shifted right. The new elements have undefined state and must be set with + * `upb_Array_Set()`. + * REQUIRES: `i <= upb_Array_Size(arr)` */ + bool upb_Array_Insert(upb_Array* array, size_t i, size_t count, upb_Arena* arena); + + /* Deletes one or more elements from the array. Existing elements are shifted + * left. + * REQUIRES: `i + count <= upb_Array_Size(arr)` */ + void upb_Array_Delete(upb_Array* array, size_t i, size_t count); + + /* Changes the size of a vector. New elements are initialized to empty/0. + * Returns false on allocation failure. */ + bool upb_Array_Resize(upb_Array* array, size_t size, upb_Arena* arena); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_ARRAY_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/bindings/lua/upb.h b/CAPI/cpp/grpc/include/upb/bindings/lua/upb.h new file mode 100644 index 00000000..0aa6a902 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/bindings/lua/upb.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Shared definitions for upb Lua modules. + */ + +#ifndef UPB_LUA_UPB_H_ +#define UPB_LUA_UPB_H_ + +#include "lauxlib.h" +#include "upb/def.h" +#include "upb/msg.h" +#include "upb/reflection.h" + +/* Lua changes its API in incompatible ways in every minor release. + * This is some shim code to paper over the differences. */ + +#if LUA_VERSION_NUM == 501 +#define lua_rawlen lua_objlen +#define lua_setuservalue(L, idx) lua_setfenv(L, idx) +#define lua_getuservalue(L, idx) lua_getfenv(L, idx) +#define lupb_setfuncs(L, l) luaL_register(L, NULL, l) +#elif LUA_VERSION_NUM >= 502 && LUA_VERSION_NUM <= 504 +#define lupb_setfuncs(L, l) luaL_setfuncs(L, l, 0) +#else +#error Only Lua 5.1-5.4 are supported +#endif + +/* Create a new userdata with the given type and |n| uservals, which are popped + * from the stack to initialize the userdata. */ +void* lupb_newuserdata(lua_State* L, size_t size, int n, const char* type); + +#if LUA_VERSION_NUM < 504 +/* Polyfills for this Lua 5.4 function. Pushes userval |n| for the userdata at + * |index|. */ +int lua_setiuservalue(lua_State* L, int index, int n); +int lua_getiuservalue(lua_State* L, int index, int n); +#endif + +/* Registers a type with the given name, methods, and metamethods. */ +void lupb_register_type(lua_State* L, const char* name, const luaL_Reg* m, const luaL_Reg* mm); + +/* Checks the given upb_Status and throws a Lua error if it is not ok. */ +void lupb_checkstatus(lua_State* L, upb_Status* s); + +int luaopen_lupb(lua_State* L); + +/* C <-> Lua value conversions. ***********************************************/ + +/* Custom check/push functions. Unlike the Lua equivalents, they are pinned to + * specific C types (instead of lua_Number, etc), and do not allow any implicit + * conversion or data loss. */ +int64_t lupb_checkint64(lua_State* L, int narg); +int32_t lupb_checkint32(lua_State* L, int narg); +uint64_t lupb_checkuint64(lua_State* L, int narg); +uint32_t lupb_checkuint32(lua_State* L, int narg); +double lupb_checkdouble(lua_State* L, int narg); +float lupb_checkfloat(lua_State* L, int narg); +bool lupb_checkbool(lua_State* L, int narg); +const char* lupb_checkstring(lua_State* L, int narg, size_t* len); +const char* lupb_checkname(lua_State* L, int narg); + +void lupb_pushint64(lua_State* L, int64_t val); +void lupb_pushint32(lua_State* L, int32_t val); +void lupb_pushuint64(lua_State* L, uint64_t val); +void lupb_pushuint32(lua_State* L, uint32_t val); + +/** From def.c. ***************************************************************/ + +const upb_MessageDef* lupb_MessageDef_check(lua_State* L, int narg); +const upb_EnumDef* lupb_EnumDef_check(lua_State* L, int narg); +const upb_FieldDef* lupb_FieldDef_check(lua_State* L, int narg); +upb_DefPool* lupb_DefPool_check(lua_State* L, int narg); +void lupb_MessageDef_pushsubmsgdef(lua_State* L, const upb_FieldDef* f); + +void lupb_def_registertypes(lua_State* L); + +/** From msg.c. ***************************************************************/ + +void lupb_pushmsgval(lua_State* L, int container, upb_CType type, upb_MessageValue val); +int lupb_MessageDef_call(lua_State* L); +upb_Arena* lupb_Arena_pushnew(lua_State* L); + +void lupb_msg_registertypes(lua_State* L); + +#define lupb_assert(L, predicate) \ + if (!(predicate)) \ + luaL_error(L, "internal error: %s, %s:%d ", #predicate, __FILE__, __LINE__); + +#define LUPB_UNUSED(var) (void)var + +#if defined(__GNUC__) || defined(__clang__) +#define LUPB_UNREACHABLE() \ + do \ + { \ + assert(0); \ + __builtin_unreachable(); \ + } while (0) +#else +#define LUPB_UNREACHABLE() \ + do \ + { \ + assert(0); \ + } while (0) +#endif + +#endif /* UPB_LUA_UPB_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/collections.h b/CAPI/cpp/grpc/include/upb/collections.h new file mode 100644 index 00000000..81558dc5 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/collections.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_COLLECTIONS_H_ +#define UPB_COLLECTIONS_H_ + +// TODO(b/232091617): Delete this entire header which currently exists only for +// temporary backwards compatibility. +#include "upb/array.h" +#include "upb/map.h" + +#endif /* UPB_COLLECTIONS_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/decode.h b/CAPI/cpp/grpc/include/upb/decode.h new file mode 100644 index 00000000..2b4ed409 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/decode.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * upb_decode: parsing into a upb_Message using a upb_MiniTable. + */ + +#ifndef UPB_DECODE_H_ +#define UPB_DECODE_H_ + +#include "upb/extension_registry.h" +#include "upb/msg.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + enum + { + /* If set, strings will alias the input buffer instead of copying into the + * arena. */ + kUpb_DecodeOption_AliasString = 1, + + /* If set, the parse will return failure if any message is missing any + * required fields when the message data ends. The parse will still continue, + * and the failure will only be reported at the end. + * + * IMPORTANT CAVEATS: + * + * 1. This can throw a false positive failure if an incomplete message is seen + * on the wire but is later completed when the sub-message occurs again. + * For this reason, a second pass is required to verify a failure, to be + * truly robust. + * + * 2. This can return a false success if you are decoding into a message that + * already has some sub-message fields present. If the sub-message does + * not occur in the binary payload, we will never visit it and discover the + * incomplete sub-message. For this reason, this check is only useful for + * implemting ParseFromString() semantics. For MergeFromString(), a + * post-parse validation step will always be necessary. */ + kUpb_DecodeOption_CheckRequired = 2, + }; + +#define UPB_DECODE_MAXDEPTH(depth) ((depth) << 16) + + typedef enum + { + kUpb_DecodeStatus_Ok = 0, + kUpb_DecodeStatus_Malformed = 1, // Wire format was corrupt + kUpb_DecodeStatus_OutOfMemory = 2, // Arena alloc failed + kUpb_DecodeStatus_BadUtf8 = 3, // String field had bad UTF-8 + kUpb_DecodeStatus_MaxDepthExceeded = 4, // Exceeded UPB_DECODE_MAXDEPTH + + // kUpb_DecodeOption_CheckRequired failed (see above), but the parse otherwise + // succeeded. + kUpb_DecodeStatus_MissingRequired = 5, + } upb_DecodeStatus; + + upb_DecodeStatus upb_Decode(const char* buf, size_t size, upb_Message* msg, const upb_MiniTable* l, const upb_ExtensionRegistry* extreg, int options, upb_Arena* arena); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_DECODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/decode_fast.h b/CAPI/cpp/grpc/include/upb/decode_fast.h new file mode 100644 index 00000000..d656c6b3 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/decode_fast.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +// These are the specialized field parser functions for the fast parser. +// Generated tables will refer to these by name. +// +// The function names are encoded with names like: +// +// // 123 4 +// upb_pss_1bt(); // Parse singular string, 1 byte tag. +// +// In position 1: +// - 'p' for parse, most function use this +// - 'c' for copy, for when we are copying strings instead of aliasing +// +// In position 2 (cardinality): +// - 's' for singular, with or without hasbit +// - 'o' for oneof +// - 'r' for non-packed repeated +// - 'p' for packed repeated +// +// In position 3 (type): +// - 'b1' for bool +// - 'v4' for 4-byte varint +// - 'v8' for 8-byte varint +// - 'z4' for zig-zag-encoded 4-byte varint +// - 'z8' for zig-zag-encoded 8-byte varint +// - 'f4' for 4-byte fixed +// - 'f8' for 8-byte fixed +// - 'm' for sub-message +// - 's' for string (validate UTF-8) +// - 'b' for bytes +// +// In position 4 (tag length): +// - '1' for one-byte tags (field numbers 1-15) +// - '2' for two-byte tags (field numbers 16-2048) + +#ifndef UPB_DECODE_FAST_H_ +#define UPB_DECODE_FAST_H_ + +#include "upb/msg.h" + +struct upb_Decoder; + +// The fallback, generic parsing function that can handle any field type. +// This just uses the regular (non-fast) parser to parse a single field. +const char* fastdecode_generic(struct upb_Decoder* d, const char* ptr, upb_Message* msg, intptr_t table, uint64_t hasbits, uint64_t data); + +#define UPB_PARSE_PARAMS \ + struct upb_Decoder *d, const char *ptr, upb_Message *msg, intptr_t table, \ + uint64_t hasbits, uint64_t data + +/* primitive fields ***********************************************************/ + +#define F(card, type, valbytes, tagbytes) \ + const char* upb_p##card##type##valbytes##_##tagbytes##bt(UPB_PARSE_PARAMS); + +#define TYPES(card, tagbytes) \ + F(card, b, 1, tagbytes) \ + F(card, v, 4, tagbytes) \ + F(card, v, 8, tagbytes) \ + F(card, z, 4, tagbytes) \ + F(card, z, 8, tagbytes) \ + F(card, f, 4, tagbytes) \ + F(card, f, 8, tagbytes) + +#define TAGBYTES(card) \ + TYPES(card, 1) \ + TYPES(card, 2) + +TAGBYTES(s) +TAGBYTES(o) +TAGBYTES(r) +TAGBYTES(p) + +#undef F +#undef TYPES +#undef TAGBYTES + +/* string fields **************************************************************/ + +#define F(card, tagbytes, type) \ + const char* upb_p##card##type##_##tagbytes##bt(UPB_PARSE_PARAMS); \ + const char* upb_c##card##type##_##tagbytes##bt(UPB_PARSE_PARAMS); + +#define UTF8(card, tagbytes) \ + F(card, tagbytes, s) \ + F(card, tagbytes, b) + +#define TAGBYTES(card) \ + UTF8(card, 1) \ + UTF8(card, 2) + +TAGBYTES(s) +TAGBYTES(o) +TAGBYTES(r) + +#undef F +#undef TAGBYTES + +/* sub-message fields *********************************************************/ + +#define F(card, tagbytes, size_ceil, ceil_arg) \ + const char* upb_p##card##m_##tagbytes##bt_max##size_ceil##b(UPB_PARSE_PARAMS); + +#define SIZES(card, tagbytes) \ + F(card, tagbytes, 64, 64) \ + F(card, tagbytes, 128, 128) \ + F(card, tagbytes, 192, 192) \ + F(card, tagbytes, 256, 256) \ + F(card, tagbytes, max, -1) + +#define TAGBYTES(card) \ + SIZES(card, 1) \ + SIZES(card, 2) + +TAGBYTES(s) +TAGBYTES(o) +TAGBYTES(r) + +#undef TAGBYTES +#undef SIZES +#undef F + +#undef UPB_PARSE_PARAMS + +#endif /* UPB_DECODE_FAST_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/decode_internal.h b/CAPI/cpp/grpc/include/upb/decode_internal.h new file mode 100644 index 00000000..fe3cbc7d --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/decode_internal.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_DECODE_INT_H_ +#define UPB_DECODE_INT_H_ + +// TODO(b/232091617): Delete this entire header which currently exists only for +// temporary backwards compatibility. + +#include "upb/internal/decode.h" + +#endif /* UPB_DECODE_INT_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/def.h b/CAPI/cpp/grpc/include/upb/def.h new file mode 100644 index 00000000..ca9b56ba --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/def.h @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_DEF_H_ +#define UPB_DEF_H_ + +#include "google/protobuf/descriptor.upb.h" +#include "upb/internal/table.h" +#include "upb/upb.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + struct upb_EnumDef; + typedef struct upb_EnumDef upb_EnumDef; + struct upb_EnumValueDef; + typedef struct upb_EnumValueDef upb_EnumValueDef; + struct upb_ExtensionRange; + typedef struct upb_ExtensionRange upb_ExtensionRange; + struct upb_FieldDef; + typedef struct upb_FieldDef upb_FieldDef; + struct upb_FileDef; + typedef struct upb_FileDef upb_FileDef; + struct upb_MethodDef; + typedef struct upb_MethodDef upb_MethodDef; + struct upb_MessageDef; + typedef struct upb_MessageDef upb_MessageDef; + struct upb_OneofDef; + typedef struct upb_OneofDef upb_OneofDef; + struct upb_ServiceDef; + typedef struct upb_ServiceDef upb_ServiceDef; + struct upb_streamdef; + typedef struct upb_streamdef upb_streamdef; + struct upb_DefPool; + typedef struct upb_DefPool upb_DefPool; + + typedef enum + { + kUpb_Syntax_Proto2 = 2, + kUpb_Syntax_Proto3 = 3 + } upb_Syntax; + + /* All the different kind of well known type messages. For simplicity of check, + * number wrappers and string wrappers are grouped together. Make sure the + * order and merber of these groups are not changed. + */ + typedef enum + { + kUpb_WellKnown_Unspecified, + kUpb_WellKnown_Any, + kUpb_WellKnown_FieldMask, + kUpb_WellKnown_Duration, + kUpb_WellKnown_Timestamp, + /* number wrappers */ + kUpb_WellKnown_DoubleValue, + kUpb_WellKnown_FloatValue, + kUpb_WellKnown_Int64Value, + kUpb_WellKnown_UInt64Value, + kUpb_WellKnown_Int32Value, + kUpb_WellKnown_UInt32Value, + /* string wrappers */ + kUpb_WellKnown_StringValue, + kUpb_WellKnown_BytesValue, + kUpb_WellKnown_BoolValue, + kUpb_WellKnown_Value, + kUpb_WellKnown_ListValue, + kUpb_WellKnown_Struct + } upb_WellKnown; + +/* upb_FieldDef ***************************************************************/ + +/* Maximum field number allowed for FieldDefs. This is an inherent limit of the + * protobuf wire format. */ +#define kUpb_MaxFieldNumber ((1 << 29) - 1) + + const google_protobuf_FieldOptions* upb_FieldDef_Options(const upb_FieldDef* f); + bool upb_FieldDef_HasOptions(const upb_FieldDef* f); + const char* upb_FieldDef_FullName(const upb_FieldDef* f); + upb_CType upb_FieldDef_CType(const upb_FieldDef* f); + upb_FieldType upb_FieldDef_Type(const upb_FieldDef* f); + upb_Label upb_FieldDef_Label(const upb_FieldDef* f); + uint32_t upb_FieldDef_Number(const upb_FieldDef* f); + const char* upb_FieldDef_Name(const upb_FieldDef* f); + const char* upb_FieldDef_JsonName(const upb_FieldDef* f); + bool upb_FieldDef_HasJsonName(const upb_FieldDef* f); + bool upb_FieldDef_IsExtension(const upb_FieldDef* f); + bool upb_FieldDef_IsPacked(const upb_FieldDef* f); + const upb_FileDef* upb_FieldDef_File(const upb_FieldDef* f); + const upb_MessageDef* upb_FieldDef_ContainingType(const upb_FieldDef* f); + const upb_MessageDef* upb_FieldDef_ExtensionScope(const upb_FieldDef* f); + const upb_OneofDef* upb_FieldDef_ContainingOneof(const upb_FieldDef* f); + const upb_OneofDef* upb_FieldDef_RealContainingOneof(const upb_FieldDef* f); + uint32_t upb_FieldDef_Index(const upb_FieldDef* f); + bool upb_FieldDef_IsSubMessage(const upb_FieldDef* f); + bool upb_FieldDef_IsString(const upb_FieldDef* f); + bool upb_FieldDef_IsOptional(const upb_FieldDef* f); + bool upb_FieldDef_IsRequired(const upb_FieldDef* f); + bool upb_FieldDef_IsRepeated(const upb_FieldDef* f); + bool upb_FieldDef_IsPrimitive(const upb_FieldDef* f); + bool upb_FieldDef_IsMap(const upb_FieldDef* f); + bool upb_FieldDef_HasDefault(const upb_FieldDef* f); + bool upb_FieldDef_HasSubDef(const upb_FieldDef* f); + bool upb_FieldDef_HasPresence(const upb_FieldDef* f); + const upb_MessageDef* upb_FieldDef_MessageSubDef(const upb_FieldDef* f); + const upb_EnumDef* upb_FieldDef_EnumSubDef(const upb_FieldDef* f); + const upb_MiniTable_Field* upb_FieldDef_MiniTable(const upb_FieldDef* f); + const upb_MiniTable_Extension* _upb_FieldDef_ExtensionMiniTable( + const upb_FieldDef* f + ); + bool _upb_FieldDef_IsProto3Optional(const upb_FieldDef* f); + + /* upb_OneofDef ***************************************************************/ + + const google_protobuf_OneofOptions* upb_OneofDef_Options(const upb_OneofDef* o); + bool upb_OneofDef_HasOptions(const upb_OneofDef* o); + const char* upb_OneofDef_Name(const upb_OneofDef* o); + const upb_MessageDef* upb_OneofDef_ContainingType(const upb_OneofDef* o); + uint32_t upb_OneofDef_Index(const upb_OneofDef* o); + bool upb_OneofDef_IsSynthetic(const upb_OneofDef* o); + int upb_OneofDef_FieldCount(const upb_OneofDef* o); + const upb_FieldDef* upb_OneofDef_Field(const upb_OneofDef* o, int i); + + /* Oneof lookups: + * - ntof: look up a field by name. + * - ntofz: look up a field by name (as a null-terminated string). + * - itof: look up a field by number. */ + const upb_FieldDef* upb_OneofDef_LookupNameWithSize(const upb_OneofDef* o, const char* name, size_t length); + UPB_INLINE const upb_FieldDef* upb_OneofDef_LookupName(const upb_OneofDef* o, const char* name) + { + return upb_OneofDef_LookupNameWithSize(o, name, strlen(name)); + } + const upb_FieldDef* upb_OneofDef_LookupNumber(const upb_OneofDef* o, uint32_t num); + +/* upb_MessageDef *************************************************************/ + +/* Well-known field tag numbers for map-entry messages. */ +#define kUpb_MapEntry_KeyFieldNumber 1 +#define kUpb_MapEntry_ValueFieldNumber 2 + +/* Well-known field tag numbers for Any messages. */ +#define kUpb_Any_TypeFieldNumber 1 +#define kUpb_Any_ValueFieldNumber 2 + +/* Well-known field tag numbers for duration messages. */ +#define kUpb_Duration_SecondsFieldNumber 1 +#define kUpb_Duration_NanosFieldNumber 2 + +/* Well-known field tag numbers for timestamp messages. */ +#define kUpb_Timestamp_SecondsFieldNumber 1 +#define kUpb_Timestamp_NanosFieldNumber 2 + + const google_protobuf_MessageOptions* upb_MessageDef_Options( + const upb_MessageDef* m + ); + bool upb_MessageDef_HasOptions(const upb_MessageDef* m); + const char* upb_MessageDef_FullName(const upb_MessageDef* m); + const upb_FileDef* upb_MessageDef_File(const upb_MessageDef* m); + const upb_MessageDef* upb_MessageDef_ContainingType(const upb_MessageDef* m); + const char* upb_MessageDef_Name(const upb_MessageDef* m); + upb_Syntax upb_MessageDef_Syntax(const upb_MessageDef* m); + upb_WellKnown upb_MessageDef_WellKnownType(const upb_MessageDef* m); + int upb_MessageDef_ExtensionRangeCount(const upb_MessageDef* m); + int upb_MessageDef_FieldCount(const upb_MessageDef* m); + int upb_MessageDef_OneofCount(const upb_MessageDef* m); + const upb_ExtensionRange* upb_MessageDef_ExtensionRange(const upb_MessageDef* m, int i); + const upb_FieldDef* upb_MessageDef_Field(const upb_MessageDef* m, int i); + const upb_OneofDef* upb_MessageDef_Oneof(const upb_MessageDef* m, int i); + const upb_FieldDef* upb_MessageDef_FindFieldByNumber(const upb_MessageDef* m, uint32_t i); + const upb_FieldDef* upb_MessageDef_FindFieldByNameWithSize( + const upb_MessageDef* m, const char* name, size_t len + ); + const upb_OneofDef* upb_MessageDef_FindOneofByNameWithSize( + const upb_MessageDef* m, const char* name, size_t len + ); + const upb_MiniTable* upb_MessageDef_MiniTable(const upb_MessageDef* m); + + UPB_INLINE const upb_OneofDef* upb_MessageDef_FindOneofByName( + const upb_MessageDef* m, const char* name + ) + { + return upb_MessageDef_FindOneofByNameWithSize(m, name, strlen(name)); + } + + UPB_INLINE const upb_FieldDef* upb_MessageDef_FindFieldByName( + const upb_MessageDef* m, const char* name + ) + { + return upb_MessageDef_FindFieldByNameWithSize(m, name, strlen(name)); + } + + UPB_INLINE bool upb_MessageDef_IsMapEntry(const upb_MessageDef* m) + { + return google_protobuf_MessageOptions_map_entry(upb_MessageDef_Options(m)); + } + + UPB_INLINE bool upb_MessageDef_IsMessageSet(const upb_MessageDef* m) + { + return google_protobuf_MessageOptions_message_set_wire_format( + upb_MessageDef_Options(m) + ); + } + + /* Nested entities. */ + int upb_MessageDef_NestedMessageCount(const upb_MessageDef* m); + int upb_MessageDef_NestedEnumCount(const upb_MessageDef* m); + int upb_MessageDef_NestedExtensionCount(const upb_MessageDef* m); + const upb_MessageDef* upb_MessageDef_NestedMessage(const upb_MessageDef* m, int i); + const upb_EnumDef* upb_MessageDef_NestedEnum(const upb_MessageDef* m, int i); + const upb_FieldDef* upb_MessageDef_NestedExtension(const upb_MessageDef* m, int i); + + /* Lookup of either field or oneof by name. Returns whether either was found. + * If the return is true, then the found def will be set, and the non-found + * one set to NULL. */ + bool upb_MessageDef_FindByNameWithSize(const upb_MessageDef* m, const char* name, size_t len, const upb_FieldDef** f, const upb_OneofDef** o); + + UPB_INLINE bool upb_MessageDef_FindByName(const upb_MessageDef* m, const char* name, const upb_FieldDef** f, const upb_OneofDef** o) + { + return upb_MessageDef_FindByNameWithSize(m, name, strlen(name), f, o); + } + + /* Returns a field by either JSON name or regular proto name. */ + const upb_FieldDef* upb_MessageDef_FindByJsonNameWithSize( + const upb_MessageDef* m, const char* name, size_t len + ); + UPB_INLINE const upb_FieldDef* upb_MessageDef_FindByJsonName( + const upb_MessageDef* m, const char* name + ) + { + return upb_MessageDef_FindByJsonNameWithSize(m, name, strlen(name)); + } + + /* upb_ExtensionRange *********************************************************/ + + const google_protobuf_ExtensionRangeOptions* upb_ExtensionRange_Options( + const upb_ExtensionRange* r + ); + bool upb_ExtensionRange_HasOptions(const upb_ExtensionRange* r); + int32_t upb_ExtensionRange_Start(const upb_ExtensionRange* r); + int32_t upb_ExtensionRange_End(const upb_ExtensionRange* r); + + /* upb_EnumDef ****************************************************************/ + + const google_protobuf_EnumOptions* upb_EnumDef_Options(const upb_EnumDef* e); + bool upb_EnumDef_HasOptions(const upb_EnumDef* e); + const char* upb_EnumDef_FullName(const upb_EnumDef* e); + const char* upb_EnumDef_Name(const upb_EnumDef* e); + const upb_FileDef* upb_EnumDef_File(const upb_EnumDef* e); + const upb_MessageDef* upb_EnumDef_ContainingType(const upb_EnumDef* e); + int32_t upb_EnumDef_Default(const upb_EnumDef* e); + int upb_EnumDef_ValueCount(const upb_EnumDef* e); + const upb_EnumValueDef* upb_EnumDef_Value(const upb_EnumDef* e, int i); + + const upb_EnumValueDef* upb_EnumDef_FindValueByNameWithSize( + const upb_EnumDef* e, const char* name, size_t len + ); + const upb_EnumValueDef* upb_EnumDef_FindValueByNumber(const upb_EnumDef* e, int32_t num); + bool upb_EnumDef_CheckNumber(const upb_EnumDef* e, int32_t num); + + // Convenience wrapper. + UPB_INLINE const upb_EnumValueDef* upb_EnumDef_FindValueByName( + const upb_EnumDef* e, const char* name + ) + { + return upb_EnumDef_FindValueByNameWithSize(e, name, strlen(name)); + } + + /* upb_EnumValueDef ***********************************************************/ + + const google_protobuf_EnumValueOptions* upb_EnumValueDef_Options( + const upb_EnumValueDef* e + ); + bool upb_EnumValueDef_HasOptions(const upb_EnumValueDef* e); + const char* upb_EnumValueDef_FullName(const upb_EnumValueDef* e); + const char* upb_EnumValueDef_Name(const upb_EnumValueDef* e); + int32_t upb_EnumValueDef_Number(const upb_EnumValueDef* e); + uint32_t upb_EnumValueDef_Index(const upb_EnumValueDef* e); + const upb_EnumDef* upb_EnumValueDef_Enum(const upb_EnumValueDef* e); + + /* upb_FileDef ****************************************************************/ + + const google_protobuf_FileOptions* upb_FileDef_Options(const upb_FileDef* f); + bool upb_FileDef_HasOptions(const upb_FileDef* f); + const char* upb_FileDef_Name(const upb_FileDef* f); + const char* upb_FileDef_Package(const upb_FileDef* f); + upb_Syntax upb_FileDef_Syntax(const upb_FileDef* f); + int upb_FileDef_DependencyCount(const upb_FileDef* f); + int upb_FileDef_PublicDependencyCount(const upb_FileDef* f); + int upb_FileDef_WeakDependencyCount(const upb_FileDef* f); + int upb_FileDef_TopLevelMessageCount(const upb_FileDef* f); + int upb_FileDef_TopLevelEnumCount(const upb_FileDef* f); + int upb_FileDef_TopLevelExtensionCount(const upb_FileDef* f); + int upb_FileDef_ServiceCount(const upb_FileDef* f); + const upb_FileDef* upb_FileDef_Dependency(const upb_FileDef* f, int i); + const upb_FileDef* upb_FileDef_PublicDependency(const upb_FileDef* f, int i); + const upb_FileDef* upb_FileDef_WeakDependency(const upb_FileDef* f, int i); + const upb_MessageDef* upb_FileDef_TopLevelMessage(const upb_FileDef* f, int i); + const upb_EnumDef* upb_FileDef_TopLevelEnum(const upb_FileDef* f, int i); + const upb_FieldDef* upb_FileDef_TopLevelExtension(const upb_FileDef* f, int i); + const upb_ServiceDef* upb_FileDef_Service(const upb_FileDef* f, int i); + const upb_DefPool* upb_FileDef_Pool(const upb_FileDef* f); + const int32_t* _upb_FileDef_PublicDependencyIndexes(const upb_FileDef* f); + const int32_t* _upb_FileDef_WeakDependencyIndexes(const upb_FileDef* f); + + /* upb_MethodDef **************************************************************/ + + const google_protobuf_MethodOptions* upb_MethodDef_Options( + const upb_MethodDef* m + ); + bool upb_MethodDef_HasOptions(const upb_MethodDef* m); + const char* upb_MethodDef_FullName(const upb_MethodDef* m); + int upb_MethodDef_Index(const upb_MethodDef* m); + const char* upb_MethodDef_Name(const upb_MethodDef* m); + const upb_ServiceDef* upb_MethodDef_Service(const upb_MethodDef* m); + const upb_MessageDef* upb_MethodDef_InputType(const upb_MethodDef* m); + const upb_MessageDef* upb_MethodDef_OutputType(const upb_MethodDef* m); + bool upb_MethodDef_ClientStreaming(const upb_MethodDef* m); + bool upb_MethodDef_ServerStreaming(const upb_MethodDef* m); + + /* upb_ServiceDef *************************************************************/ + + const google_protobuf_ServiceOptions* upb_ServiceDef_Options( + const upb_ServiceDef* s + ); + bool upb_ServiceDef_HasOptions(const upb_ServiceDef* s); + const char* upb_ServiceDef_FullName(const upb_ServiceDef* s); + const char* upb_ServiceDef_Name(const upb_ServiceDef* s); + int upb_ServiceDef_Index(const upb_ServiceDef* s); + const upb_FileDef* upb_ServiceDef_File(const upb_ServiceDef* s); + int upb_ServiceDef_MethodCount(const upb_ServiceDef* s); + const upb_MethodDef* upb_ServiceDef_Method(const upb_ServiceDef* s, int i); + const upb_MethodDef* upb_ServiceDef_FindMethodByName(const upb_ServiceDef* s, const char* name); + + /* upb_DefPool ****************************************************************/ + + upb_DefPool* upb_DefPool_New(void); + void upb_DefPool_Free(upb_DefPool* s); + const upb_MessageDef* upb_DefPool_FindMessageByName(const upb_DefPool* s, const char* sym); + const upb_MessageDef* upb_DefPool_FindMessageByNameWithSize( + const upb_DefPool* s, const char* sym, size_t len + ); + const upb_EnumDef* upb_DefPool_FindEnumByName(const upb_DefPool* s, const char* sym); + const upb_EnumValueDef* upb_DefPool_FindEnumByNameval(const upb_DefPool* s, const char* sym); + const upb_FieldDef* upb_DefPool_FindExtensionByName(const upb_DefPool* s, const char* sym); + const upb_FieldDef* upb_DefPool_FindExtensionByNameWithSize( + const upb_DefPool* s, const char* sym, size_t len + ); + const upb_FileDef* upb_DefPool_FindFileByName(const upb_DefPool* s, const char* name); + const upb_ServiceDef* upb_DefPool_FindServiceByName(const upb_DefPool* s, const char* name); + const upb_ServiceDef* upb_DefPool_FindServiceByNameWithSize( + const upb_DefPool* s, const char* name, size_t size + ); + const upb_FileDef* upb_DefPool_FindFileContainingSymbol(const upb_DefPool* s, const char* name); + const upb_FileDef* upb_DefPool_FindFileByNameWithSize(const upb_DefPool* s, const char* name, size_t len); + const upb_FileDef* upb_DefPool_AddFile( + upb_DefPool* s, const google_protobuf_FileDescriptorProto* file, upb_Status* status + ); + size_t _upb_DefPool_BytesLoaded(const upb_DefPool* s); + upb_Arena* _upb_DefPool_Arena(const upb_DefPool* s); + const upb_FieldDef* _upb_DefPool_FindExtensionByMiniTable( + const upb_DefPool* s, const upb_MiniTable_Extension* ext + ); + const upb_FieldDef* upb_DefPool_FindExtensionByNumber(const upb_DefPool* s, const upb_MessageDef* m, int32_t fieldnum); + const upb_ExtensionRegistry* upb_DefPool_ExtensionRegistry( + const upb_DefPool* s + ); + const upb_FieldDef** upb_DefPool_GetAllExtensions(const upb_DefPool* s, const upb_MessageDef* m, size_t* count); + + /* For generated code only: loads a generated descriptor. */ + typedef struct _upb_DefPool_Init + { + struct _upb_DefPool_Init** deps; /* Dependencies of this file. */ + const upb_MiniTable_File* layout; + const char* filename; + upb_StringView descriptor; /* Serialized descriptor. */ + } _upb_DefPool_Init; + + // Should only be directly called by tests. This variant lets us suppress + // the use of compiled-in tables, forcing a rebuild of the tables at runtime. + bool _upb_DefPool_LoadDefInitEx(upb_DefPool* s, const _upb_DefPool_Init* init, bool rebuild_minitable); + + UPB_INLINE bool _upb_DefPool_LoadDefInit(upb_DefPool* s, const _upb_DefPool_Init* init) + { + return _upb_DefPool_LoadDefInitEx(s, init, false); + } + +#include "upb/port_undef.inc" + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* UPB_DEF_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/def.hpp b/CAPI/cpp/grpc/include/upb/def.hpp new file mode 100644 index 00000000..ce7d9158 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/def.hpp @@ -0,0 +1,713 @@ +// Copyright (c) 2009-2021, Google LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Google LLC nor the +// names of its contributors may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef UPB_DEF_HPP_ +#define UPB_DEF_HPP_ + +#include +#include +#include +#include + +#include "upb/def.h" +#include "upb/reflection.h" +#include "upb/upb.hpp" + +namespace upb +{ + + typedef upb_MessageValue MessageValue; + + class EnumDefPtr; + class FileDefPtr; + class MessageDefPtr; + class OneofDefPtr; + + // A upb::FieldDefPtr describes a single field in a message. It is most often + // found as a part of a upb_MessageDef, but can also stand alone to represent + // an extension. + class FieldDefPtr + { + public: + FieldDefPtr() : + ptr_(nullptr) + { + } + explicit FieldDefPtr(const upb_FieldDef* ptr) : + ptr_(ptr) + { + } + + const upb_FieldDef* ptr() const + { + return ptr_; + } + explicit operator bool() const + { + return ptr_ != nullptr; + } + + typedef upb_CType Type; + typedef upb_Label Label; + typedef upb_FieldType DescriptorType; + + const char* full_name() const + { + return upb_FieldDef_FullName(ptr_); + } + + Type type() const + { + return upb_FieldDef_CType(ptr_); + } + Label label() const + { + return upb_FieldDef_Label(ptr_); + } + const char* name() const + { + return upb_FieldDef_Name(ptr_); + } + const char* json_name() const + { + return upb_FieldDef_JsonName(ptr_); + } + uint32_t number() const + { + return upb_FieldDef_Number(ptr_); + } + bool is_extension() const + { + return upb_FieldDef_IsExtension(ptr_); + } + + // For non-string, non-submessage fields, this indicates whether binary + // protobufs are encoded in packed or non-packed format. + // + // Note: this accessor reflects the fact that "packed" has different defaults + // depending on whether the proto is proto2 or proto3. + bool packed() const + { + return upb_FieldDef_IsPacked(ptr_); + } + + // An integer that can be used as an index into an array of fields for + // whatever message this field belongs to. Guaranteed to be less than + // f->containing_type()->field_count(). May only be accessed once the def has + // been finalized. + uint32_t index() const + { + return upb_FieldDef_Index(ptr_); + } + + // The MessageDef to which this field belongs. + // + // If this field has been added to a MessageDef, that message can be retrieved + // directly (this is always the case for frozen FieldDefs). + // + // If the field has not yet been added to a MessageDef, you can set the name + // of the containing type symbolically instead. This is mostly useful for + // extensions, where the extension is declared separately from the message. + MessageDefPtr containing_type() const; + + // The OneofDef to which this field belongs, or NULL if this field is not part + // of a oneof. + OneofDefPtr containing_oneof() const; + + // The field's type according to the enum in descriptor.proto. This is not + // the same as UPB_TYPE_*, because it distinguishes between (for example) + // INT32 and SINT32, whereas our "type" enum does not. This return of + // descriptor_type() is a function of type(), integer_format(), and + // is_tag_delimited(). + DescriptorType descriptor_type() const + { + return upb_FieldDef_Type(ptr_); + } + + // Convenient field type tests. + bool IsSubMessage() const + { + return upb_FieldDef_IsSubMessage(ptr_); + } + bool IsString() const + { + return upb_FieldDef_IsString(ptr_); + } + bool IsSequence() const + { + return upb_FieldDef_IsRepeated(ptr_); + } + bool IsPrimitive() const + { + return upb_FieldDef_IsPrimitive(ptr_); + } + bool IsMap() const + { + return upb_FieldDef_IsMap(ptr_); + } + + MessageValue default_value() const + { + return upb_FieldDef_Default(ptr_); + } + + // Returns the enum or submessage def for this field, if any. The field's + // type must match (ie. you may only call enum_subdef() for fields where + // type() == kUpb_CType_Enum). + EnumDefPtr enum_subdef() const; + MessageDefPtr message_subdef() const; + + private: + const upb_FieldDef* ptr_; + }; + + // Class that represents a oneof. + class OneofDefPtr + { + public: + OneofDefPtr() : + ptr_(nullptr) + { + } + explicit OneofDefPtr(const upb_OneofDef* ptr) : + ptr_(ptr) + { + } + + const upb_OneofDef* ptr() const + { + return ptr_; + } + explicit operator bool() const + { + return ptr_ != nullptr; + } + + // Returns the MessageDef that contains this OneofDef. + MessageDefPtr containing_type() const; + + // Returns the name of this oneof. + const char* name() const + { + return upb_OneofDef_Name(ptr_); + } + + // Returns the number of fields in the oneof. + int field_count() const + { + return upb_OneofDef_FieldCount(ptr_); + } + FieldDefPtr field(int i) const + { + return FieldDefPtr(upb_OneofDef_Field(ptr_, i)); + } + + // Looks up by name. + FieldDefPtr FindFieldByName(const char* name, size_t len) const + { + return FieldDefPtr(upb_OneofDef_LookupNameWithSize(ptr_, name, len)); + } + FieldDefPtr FindFieldByName(const char* name) const + { + return FieldDefPtr(upb_OneofDef_LookupName(ptr_, name)); + } + + template + FieldDefPtr FindFieldByName(const T& str) const + { + return FindFieldByName(str.c_str(), str.size()); + } + + // Looks up by tag number. + FieldDefPtr FindFieldByNumber(uint32_t num) const + { + return FieldDefPtr(upb_OneofDef_LookupNumber(ptr_, num)); + } + + private: + const upb_OneofDef* ptr_; + }; + + // Structure that describes a single .proto message type. + class MessageDefPtr + { + public: + MessageDefPtr() : + ptr_(nullptr) + { + } + explicit MessageDefPtr(const upb_MessageDef* ptr) : + ptr_(ptr) + { + } + + const upb_MessageDef* ptr() const + { + return ptr_; + } + explicit operator bool() const + { + return ptr_ != nullptr; + } + + FileDefPtr file() const; + + const char* full_name() const + { + return upb_MessageDef_FullName(ptr_); + } + const char* name() const + { + return upb_MessageDef_Name(ptr_); + } + + // The number of fields that belong to the MessageDef. + int field_count() const + { + return upb_MessageDef_FieldCount(ptr_); + } + FieldDefPtr field(int i) const + { + return FieldDefPtr(upb_MessageDef_Field(ptr_, i)); + } + + // The number of oneofs that belong to the MessageDef. + int oneof_count() const + { + return upb_MessageDef_OneofCount(ptr_); + } + OneofDefPtr oneof(int i) const + { + return OneofDefPtr(upb_MessageDef_Oneof(ptr_, i)); + } + + upb_Syntax syntax() const + { + return upb_MessageDef_Syntax(ptr_); + } + + // These return null pointers if the field is not found. + FieldDefPtr FindFieldByNumber(uint32_t number) const + { + return FieldDefPtr(upb_MessageDef_FindFieldByNumber(ptr_, number)); + } + FieldDefPtr FindFieldByName(const char* name, size_t len) const + { + return FieldDefPtr(upb_MessageDef_FindFieldByNameWithSize(ptr_, name, len)); + } + FieldDefPtr FindFieldByName(const char* name) const + { + return FieldDefPtr(upb_MessageDef_FindFieldByName(ptr_, name)); + } + + template + FieldDefPtr FindFieldByName(const T& str) const + { + return FindFieldByName(str.c_str(), str.size()); + } + + OneofDefPtr FindOneofByName(const char* name, size_t len) const + { + return OneofDefPtr(upb_MessageDef_FindOneofByNameWithSize(ptr_, name, len)); + } + + OneofDefPtr FindOneofByName(const char* name) const + { + return OneofDefPtr(upb_MessageDef_FindOneofByName(ptr_, name)); + } + + template + OneofDefPtr FindOneofByName(const T& str) const + { + return FindOneofByName(str.c_str(), str.size()); + } + + // Is this message a map entry? + bool mapentry() const + { + return upb_MessageDef_IsMapEntry(ptr_); + } + + // Return the type of well known type message. kUpb_WellKnown_Unspecified for + // non-well-known message. + upb_WellKnown wellknowntype() const + { + return upb_MessageDef_WellKnownType(ptr_); + } + + private: + class FieldIter + { + public: + explicit FieldIter(const upb_MessageDef* m, int i) : + m_(m), + i_(i) + { + } + void operator++() + { + i_++; + } + + FieldDefPtr operator*() + { + return FieldDefPtr(upb_MessageDef_Field(m_, i_)); + } + bool operator!=(const FieldIter& other) + { + return i_ != other.i_; + } + bool operator==(const FieldIter& other) + { + return i_ == other.i_; + } + + private: + const upb_MessageDef* m_; + int i_; + }; + + class FieldAccessor + { + public: + explicit FieldAccessor(const upb_MessageDef* md) : + md_(md) + { + } + FieldIter begin() + { + return FieldIter(md_, 0); + } + FieldIter end() + { + return FieldIter(md_, upb_MessageDef_FieldCount(md_)); + } + + private: + const upb_MessageDef* md_; + }; + + class OneofIter + { + public: + explicit OneofIter(const upb_MessageDef* m, int i) : + m_(m), + i_(i) + { + } + void operator++() + { + i_++; + } + + OneofDefPtr operator*() + { + return OneofDefPtr(upb_MessageDef_Oneof(m_, i_)); + } + bool operator!=(const OneofIter& other) + { + return i_ != other.i_; + } + bool operator==(const OneofIter& other) + { + return i_ == other.i_; + } + + private: + const upb_MessageDef* m_; + int i_; + }; + + class OneofAccessor + { + public: + explicit OneofAccessor(const upb_MessageDef* md) : + md_(md) + { + } + OneofIter begin() + { + return OneofIter(md_, 0); + } + OneofIter end() + { + return OneofIter(md_, upb_MessageDef_OneofCount(md_)); + } + + private: + const upb_MessageDef* md_; + }; + + public: + FieldAccessor fields() const + { + return FieldAccessor(ptr()); + } + OneofAccessor oneofs() const + { + return OneofAccessor(ptr()); + } + + private: + const upb_MessageDef* ptr_; + }; + + class EnumValDefPtr + { + public: + EnumValDefPtr() : + ptr_(nullptr) + { + } + explicit EnumValDefPtr(const upb_EnumValueDef* ptr) : + ptr_(ptr) + { + } + + int32_t number() const + { + return upb_EnumValueDef_Number(ptr_); + } + const char* full_name() const + { + return upb_EnumValueDef_FullName(ptr_); + } + const char* name() const + { + return upb_EnumValueDef_Name(ptr_); + } + + private: + const upb_EnumValueDef* ptr_; + }; + + class EnumDefPtr + { + public: + EnumDefPtr() : + ptr_(nullptr) + { + } + explicit EnumDefPtr(const upb_EnumDef* ptr) : + ptr_(ptr) + { + } + + const upb_EnumDef* ptr() const + { + return ptr_; + } + explicit operator bool() const + { + return ptr_ != nullptr; + } + + const char* full_name() const + { + return upb_EnumDef_FullName(ptr_); + } + const char* name() const + { + return upb_EnumDef_Name(ptr_); + } + + // The value that is used as the default when no field default is specified. + // If not set explicitly, the first value that was added will be used. + // The default value must be a member of the enum. + // Requires that value_count() > 0. + int32_t default_value() const + { + return upb_EnumDef_Default(ptr_); + } + + // Returns the number of values currently defined in the enum. Note that + // multiple names can refer to the same number, so this may be greater than + // the total number of unique numbers. + int value_count() const + { + return upb_EnumDef_ValueCount(ptr_); + } + + // Lookups from name to integer, returning true if found. + EnumValDefPtr FindValueByName(const char* name) const + { + return EnumValDefPtr(upb_EnumDef_FindValueByName(ptr_, name)); + } + + // Finds the name corresponding to the given number, or NULL if none was + // found. If more than one name corresponds to this number, returns the + // first one that was added. + EnumValDefPtr FindValueByNumber(int32_t num) const + { + return EnumValDefPtr(upb_EnumDef_FindValueByNumber(ptr_, num)); + } + + private: + const upb_EnumDef* ptr_; + }; + + // Class that represents a .proto file with some things defined in it. + // + // Many users won't care about FileDefs, but they are necessary if you want to + // read the values of file-level options. + class FileDefPtr + { + public: + explicit FileDefPtr(const upb_FileDef* ptr) : + ptr_(ptr) + { + } + + const upb_FileDef* ptr() const + { + return ptr_; + } + explicit operator bool() const + { + return ptr_ != nullptr; + } + + // Get/set name of the file (eg. "foo/bar.proto"). + const char* name() const + { + return upb_FileDef_Name(ptr_); + } + + // Package name for definitions inside the file (eg. "foo.bar"). + const char* package() const + { + return upb_FileDef_Package(ptr_); + } + + // Syntax for the file. Defaults to proto2. + upb_Syntax syntax() const + { + return upb_FileDef_Syntax(ptr_); + } + + // Get the list of dependencies from the file. These are returned in the + // order that they were added to the FileDefPtr. + int dependency_count() const + { + return upb_FileDef_DependencyCount(ptr_); + } + const FileDefPtr dependency(int index) const + { + return FileDefPtr(upb_FileDef_Dependency(ptr_, index)); + } + + private: + const upb_FileDef* ptr_; + }; + + // Non-const methods in upb::DefPool are NOT thread-safe. + class DefPool + { + public: + DefPool() : + ptr_(upb_DefPool_New(), upb_DefPool_Free) + { + } + explicit DefPool(upb_DefPool* s) : + ptr_(s, upb_DefPool_Free) + { + } + + const upb_DefPool* ptr() const + { + return ptr_.get(); + } + upb_DefPool* ptr() + { + return ptr_.get(); + } + + // Finds an entry in the symbol table with this exact name. If not found, + // returns NULL. + MessageDefPtr FindMessageByName(const char* sym) const + { + return MessageDefPtr(upb_DefPool_FindMessageByName(ptr_.get(), sym)); + } + + EnumDefPtr FindEnumByName(const char* sym) const + { + return EnumDefPtr(upb_DefPool_FindEnumByName(ptr_.get(), sym)); + } + + FileDefPtr FindFileByName(const char* name) const + { + return FileDefPtr(upb_DefPool_FindFileByName(ptr_.get(), name)); + } + + // TODO: iteration? + + // Adds the given serialized FileDescriptorProto to the pool. + FileDefPtr AddFile(const google_protobuf_FileDescriptorProto* file_proto, Status* status) + { + return FileDefPtr( + upb_DefPool_AddFile(ptr_.get(), file_proto, status->ptr()) + ); + } + + private: + std::unique_ptr ptr_; + }; + + // TODO(b/236632406): This typedef is deprecated. Delete it. + using SymbolTable = DefPool; + + inline FileDefPtr MessageDefPtr::file() const + { + return FileDefPtr(upb_MessageDef_File(ptr_)); + } + + inline MessageDefPtr FieldDefPtr::message_subdef() const + { + return MessageDefPtr(upb_FieldDef_MessageSubDef(ptr_)); + } + + inline MessageDefPtr FieldDefPtr::containing_type() const + { + return MessageDefPtr(upb_FieldDef_ContainingType(ptr_)); + } + + inline MessageDefPtr OneofDefPtr::containing_type() const + { + return MessageDefPtr(upb_OneofDef_ContainingType(ptr_)); + } + + inline OneofDefPtr FieldDefPtr::containing_oneof() const + { + return OneofDefPtr(upb_FieldDef_ContainingOneof(ptr_)); + } + + inline EnumDefPtr FieldDefPtr::enum_subdef() const + { + return EnumDefPtr(upb_FieldDef_EnumSubDef(ptr_)); + } + +} // namespace upb + +#endif // UPB_DEF_HPP_ diff --git a/CAPI/cpp/grpc/include/upb/encode.h b/CAPI/cpp/grpc/include/upb/encode.h new file mode 100644 index 00000000..bf48c683 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/encode.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * upb_Encode: parsing from a upb_Message using a upb_MiniTable. + */ + +#ifndef UPB_ENCODE_H_ +#define UPB_ENCODE_H_ + +#include "upb/msg.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + enum + { + /* If set, the results of serializing will be deterministic across all + * instances of this binary. There are no guarantees across different + * binary builds. + * + * If your proto contains maps, the encoder will need to malloc()/free() + * memory during encode. */ + kUpb_EncodeOption_Deterministic = 1, + + /* When set, unknown fields are not printed. */ + kUpb_EncodeOption_SkipUnknown = 2, + + /* When set, the encode will fail if any required fields are missing. */ + kUpb_EncodeOption_CheckRequired = 4, + }; + +#define UPB_ENCODE_MAXDEPTH(depth) ((depth) << 16) + + typedef enum + { + kUpb_EncodeStatus_Ok = 0, + kUpb_EncodeStatus_OutOfMemory = 1, // Arena alloc failed + kUpb_EncodeStatus_MaxDepthExceeded = 2, // Exceeded UPB_ENCODE_MAXDEPTH + + // kUpb_EncodeOption_CheckRequired failed but the parse otherwise succeeded. + kUpb_EncodeStatus_MissingRequired = 3, + } upb_EncodeStatus; + + upb_EncodeStatus upb_Encode(const void* msg, const upb_MiniTable* l, int options, upb_Arena* arena, char** buf, size_t* size); + +#include "upb/port_undef.inc" + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_ENCODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/extension_registry.h b/CAPI/cpp/grpc/include/upb/extension_registry.h new file mode 100644 index 00000000..77071aa4 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/extension_registry.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_EXTENSION_REGISTRY_H_ +#define UPB_EXTENSION_REGISTRY_H_ + +#include + +#include "upb/upb.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* Extension registry: a dynamic data structure that stores a map of: + * (upb_MiniTable, number) -> extension info + * + * upb_decode() uses upb_ExtensionRegistry to look up extensions while parsing + * binary format. + * + * upb_ExtensionRegistry is part of the mini-table (msglayout) family of + * objects. Like all mini-table objects, it is suitable for reflection-less + * builds that do not want to expose names into the binary. + * + * Unlike most mini-table types, upb_ExtensionRegistry requires dynamic memory + * allocation and dynamic initialization: + * * If reflection is being used, then upb_DefPool will construct an appropriate + * upb_ExtensionRegistry automatically. + * * For a mini-table only build, the user must manually construct the + * upb_ExtensionRegistry and populate it with all of the extensions the user + * cares about. + * * A third alternative is to manually unpack relevant extensions after the + * main parse is complete, similar to how Any works. This is perhaps the + * nicest solution from the perspective of reducing dependencies, avoiding + * dynamic memory allocation, and avoiding the need to parse uninteresting + * extensions. The downsides are: + * (1) parse errors are not caught during the main parse + * (2) the CPU hit of parsing comes during access, which could cause an + * undesirable stutter in application performance. + * + * Users cannot directly get or put into this map. Users can only add the + * extensions from a generated module and pass the extension registry to the + * binary decoder. + * + * A upb_DefPool provides a upb_ExtensionRegistry, so any users who use + * reflection do not need to populate a upb_ExtensionRegistry directly. + */ + + struct upb_ExtensionRegistry; + typedef struct upb_ExtensionRegistry upb_ExtensionRegistry; + + /* Creates a upb_ExtensionRegistry in the given arena. The arena must outlive + * any use of the extreg. */ + upb_ExtensionRegistry* upb_ExtensionRegistry_New(upb_Arena* arena); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_EXTENSION_REGISTRY_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/internal/decode.h b/CAPI/cpp/grpc/include/upb/internal/decode.h new file mode 100644 index 00000000..601bf60f --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/internal/decode.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Internal implementation details of the decoder that are shared between + * decode.c and decode_fast.c. + */ + +#ifndef UPB_INTERNAL_DECODE_H_ +#define UPB_INTERNAL_DECODE_H_ + +#include + +#include "upb/decode.h" +#include "upb/internal/upb.h" +#include "upb/msg_internal.h" +#include "third_party/utf8_range/utf8_range.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#define DECODE_NOGROUP (uint32_t) - 1 + +typedef struct upb_Decoder +{ + const char* end; /* Can read up to 16 bytes slop beyond this. */ + const char* limit_ptr; /* = end + UPB_MIN(limit, 0) */ + upb_Message* unknown_msg; /* If non-NULL, add unknown data at buffer flip. */ + const char* unknown; /* Start of unknown data. */ + const upb_ExtensionRegistry* + extreg; /* For looking up extensions during the parse. */ + int limit; /* Submessage limit relative to end. */ + int depth; /* Tracks recursion depth to bound stack usage. */ + uint32_t end_group; /* field number of END_GROUP tag, else DECODE_NOGROUP */ + uint16_t options; + bool missing_required; + char patch[32]; + upb_Arena arena; + jmp_buf err; + +#ifndef NDEBUG + const char* debug_tagstart; + const char* debug_valstart; +#endif +} upb_Decoder; + +/* Error function that will abort decoding with longjmp(). We can't declare this + * UPB_NORETURN, even though it is appropriate, because if we do then compilers + * will "helpfully" refuse to tailcall to it + * (see: https://stackoverflow.com/a/55657013), which will defeat a major goal + * of our optimizations. That is also why we must declare it in a separate file, + * otherwise the compiler will see that it calls longjmp() and deduce that it is + * noreturn. */ +const char* fastdecode_err(upb_Decoder* d, int status); + +extern const uint8_t upb_utf8_offsets[]; + +UPB_INLINE +bool decode_verifyutf8_inl(const char* ptr, int len) +{ + const char* end = ptr + len; + + // Check 8 bytes at a time for any non-ASCII char. + while (end - ptr >= 8) + { + uint64_t data; + memcpy(&data, ptr, 8); + if (data & 0x8080808080808080) + goto non_ascii; + ptr += 8; + } + + // Check one byte at a time for non-ASCII. + while (ptr < end) + { + if (*ptr & 0x80) + goto non_ascii; + ptr++; + } + + return true; + +non_ascii: + return utf8_range2((const unsigned char*)ptr, end - ptr) == 0; +} + +const char* decode_checkrequired(upb_Decoder* d, const char* ptr, const upb_Message* msg, const upb_MiniTable* l); + +/* x86-64 pointers always have the high 16 bits matching. So we can shift + * left 8 and right 8 without loss of information. */ +UPB_INLINE intptr_t decode_totable(const upb_MiniTable* tablep) +{ + return ((intptr_t)tablep << 8) | tablep->table_mask; +} + +UPB_INLINE const upb_MiniTable* decode_totablep(intptr_t table) +{ + return (const upb_MiniTable*)(table >> 8); +} + +UPB_INLINE +const char* decode_isdonefallback_inl(upb_Decoder* d, const char* ptr, int overrun, int* status) +{ + if (overrun < d->limit) + { + /* Need to copy remaining data into patch buffer. */ + UPB_ASSERT(overrun < 16); + if (d->unknown_msg) + { + if (!_upb_Message_AddUnknown(d->unknown_msg, d->unknown, ptr - d->unknown, &d->arena)) + { + *status = kUpb_DecodeStatus_OutOfMemory; + return NULL; + } + d->unknown = &d->patch[0] + overrun; + } + memset(d->patch + 16, 0, 16); + memcpy(d->patch, d->end, 16); + ptr = &d->patch[0] + overrun; + d->end = &d->patch[16]; + d->limit -= 16; + d->limit_ptr = d->end + d->limit; + d->options &= ~kUpb_DecodeOption_AliasString; + UPB_ASSERT(ptr < d->limit_ptr); + return ptr; + } + else + { + *status = kUpb_DecodeStatus_Malformed; + return NULL; + } +} + +const char* decode_isdonefallback(upb_Decoder* d, const char* ptr, int overrun); + +UPB_INLINE +bool decode_isdone(upb_Decoder* d, const char** ptr) +{ + int overrun = *ptr - d->end; + if (UPB_LIKELY(*ptr < d->limit_ptr)) + { + return false; + } + else if (UPB_LIKELY(overrun == d->limit)) + { + return true; + } + else + { + *ptr = decode_isdonefallback(d, *ptr, overrun); + return false; + } +} + +#if UPB_FASTTABLE +UPB_INLINE +const char* fastdecode_tagdispatch(upb_Decoder* d, const char* ptr, upb_Message* msg, intptr_t table, uint64_t hasbits, uint64_t tag) +{ + const upb_MiniTable* table_p = decode_totablep(table); + uint8_t mask = table; + uint64_t data; + size_t idx = tag & mask; + UPB_ASSUME((idx & 7) == 0); + idx >>= 3; + data = table_p->fasttable[idx].field_data ^ tag; + UPB_MUSTTAIL return table_p->fasttable[idx].field_parser(d, ptr, msg, table, hasbits, data); +} +#endif + +UPB_INLINE uint32_t fastdecode_loadtag(const char* ptr) +{ + uint16_t tag; + memcpy(&tag, ptr, 2); + return tag; +} + +UPB_INLINE void decode_checklimit(upb_Decoder* d) +{ + UPB_ASSERT(d->limit_ptr == d->end + UPB_MIN(0, d->limit)); +} + +UPB_INLINE int decode_pushlimit(upb_Decoder* d, const char* ptr, int size) +{ + int limit = size + (int)(ptr - d->end); + int delta = d->limit - limit; + decode_checklimit(d); + d->limit = limit; + d->limit_ptr = d->end + UPB_MIN(0, limit); + decode_checklimit(d); + return delta; +} + +UPB_INLINE void decode_poplimit(upb_Decoder* d, const char* ptr, int saved_delta) +{ + UPB_ASSERT(ptr - d->end == d->limit); + decode_checklimit(d); + d->limit += saved_delta; + d->limit_ptr = d->end + UPB_MIN(0, d->limit); + decode_checklimit(d); +} + +#include "upb/port_undef.inc" + +#endif /* UPB_INTERNAL_DECODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/internal/mini_table_accessors.h b/CAPI/cpp/grpc/include/upb/internal/mini_table_accessors.h new file mode 100644 index 00000000..a7f01085 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/internal/mini_table_accessors.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2009-2022, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_INTERNAL_MINI_TABLE_ACCESSORS_H_ +#define UPB_INTERNAL_MINI_TABLE_ACCESSORS_H_ + +#include "upb/msg_internal.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + UPB_INLINE bool _upb_MiniTable_Field_InOneOf(const upb_MiniTable_Field* field) + { + return field->presence < 0; + } + + UPB_INLINE void _upb_MiniTable_SetPresence(upb_Message* msg, const upb_MiniTable_Field* field) + { + if (field->presence > 0) + { + _upb_sethas_field(msg, field); + } + else if (_upb_MiniTable_Field_InOneOf(field)) + { + *_upb_oneofcase_field(msg, field) = field->number; + } + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif // UPB_INTERNAL_MINI_TABLE_ACCESSORS_H_ diff --git a/CAPI/cpp/grpc/include/upb/internal/table.h b/CAPI/cpp/grpc/include/upb/internal/table.h new file mode 100644 index 00000000..ee97e41b --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/internal/table.h @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * upb_table + * + * This header is INTERNAL-ONLY! Its interfaces are not public or stable! + * This file defines very fast int->upb_value (inttable) and string->upb_value + * (strtable) hash tables. + * + * The table uses chained scatter with Brent's variation (inspired by the Lua + * implementation of hash tables). The hash function for strings is Austin + * Appleby's "MurmurHash." + * + * The inttable uses uintptr_t as its key, which guarantees it can be used to + * store pointers or integers of at least 32 bits (upb isn't really useful on + * systems where sizeof(void*) < 4). + * + * The table must be homogeneous (all values of the same type). In debug + * mode, we check this on insert and lookup. + */ + +#ifndef UPB_INTERNAL_TABLE_H_ +#define UPB_INTERNAL_TABLE_H_ + +#include +#include + +#include "upb/upb.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* upb_value ******************************************************************/ + + typedef struct + { + uint64_t val; + } upb_value; + + /* Variant that works with a length-delimited rather than NULL-delimited string, + * as supported by strtable. */ + char* upb_strdup2(const char* s, size_t len, upb_Arena* a); + + UPB_INLINE void _upb_value_setval(upb_value* v, uint64_t val) + { + v->val = val; + } + +/* For each value ctype, define the following set of functions: + * + * // Get/set an int32 from a upb_value. + * int32_t upb_value_getint32(upb_value val); + * void upb_value_setint32(upb_value *val, int32_t cval); + * + * // Construct a new upb_value from an int32. + * upb_value upb_value_int32(int32_t val); */ +#define FUNCS(name, membername, type_t, converter, proto_type) \ + UPB_INLINE void upb_value_set##name(upb_value* val, type_t cval) \ + { \ + val->val = (converter)cval; \ + } \ + UPB_INLINE upb_value upb_value_##name(type_t val) \ + { \ + upb_value ret; \ + upb_value_set##name(&ret, val); \ + return ret; \ + } \ + UPB_INLINE type_t upb_value_get##name(upb_value val) \ + { \ + return (type_t)(converter)val.val; \ + } + + FUNCS(int32, int32, int32_t, int32_t, UPB_CTYPE_INT32) + FUNCS(int64, int64, int64_t, int64_t, UPB_CTYPE_INT64) + FUNCS(uint32, uint32, uint32_t, uint32_t, UPB_CTYPE_UINT32) + FUNCS(uint64, uint64, uint64_t, uint64_t, UPB_CTYPE_UINT64) + FUNCS(bool, _bool, bool, bool, UPB_CTYPE_BOOL) + FUNCS(cstr, cstr, char*, uintptr_t, UPB_CTYPE_CSTR) + FUNCS(ptr, ptr, void*, uintptr_t, UPB_CTYPE_PTR) + FUNCS(constptr, constptr, const void*, uintptr_t, UPB_CTYPE_CONSTPTR) + +#undef FUNCS + + UPB_INLINE void upb_value_setfloat(upb_value* val, float cval) + { + memcpy(&val->val, &cval, sizeof(cval)); + } + + UPB_INLINE void upb_value_setdouble(upb_value* val, double cval) + { + memcpy(&val->val, &cval, sizeof(cval)); + } + + UPB_INLINE upb_value upb_value_float(float cval) + { + upb_value ret; + upb_value_setfloat(&ret, cval); + return ret; + } + + UPB_INLINE upb_value upb_value_double(double cval) + { + upb_value ret; + upb_value_setdouble(&ret, cval); + return ret; + } + +#undef SET_TYPE + + /* upb_tabkey *****************************************************************/ + + /* Either: + * 1. an actual integer key, or + * 2. a pointer to a string prefixed by its uint32_t length, owned by us. + * + * ...depending on whether this is a string table or an int table. We would + * make this a union of those two types, but C89 doesn't support statically + * initializing a non-first union member. */ + typedef uintptr_t upb_tabkey; + + UPB_INLINE char* upb_tabstr(upb_tabkey key, uint32_t* len) + { + char* mem = (char*)key; + if (len) + memcpy(len, mem, sizeof(*len)); + return mem + sizeof(*len); + } + + UPB_INLINE upb_StringView upb_tabstrview(upb_tabkey key) + { + upb_StringView ret; + uint32_t len; + ret.data = upb_tabstr(key, &len); + ret.size = len; + return ret; + } + + /* upb_tabval *****************************************************************/ + + typedef struct upb_tabval + { + uint64_t val; + } upb_tabval; + +#define UPB_TABVALUE_EMPTY_INIT \ + { \ + -1 \ + } + + /* upb_table ******************************************************************/ + + typedef struct _upb_tabent + { + upb_tabkey key; + upb_tabval val; + + /* Internal chaining. This is const so we can create static initializers for + * tables. We cast away const sometimes, but *only* when the containing + * upb_table is known to be non-const. This requires a bit of care, but + * the subtlety is confined to table.c. */ + const struct _upb_tabent* next; + } upb_tabent; + + typedef struct + { + size_t count; /* Number of entries in the hash part. */ + uint32_t mask; /* Mask to turn hash value -> bucket. */ + uint32_t max_count; /* Max count before we hit our load limit. */ + uint8_t size_lg2; /* Size of the hashtable part is 2^size_lg2 entries. */ + upb_tabent* entries; + } upb_table; + + typedef struct + { + upb_table t; + } upb_strtable; + + typedef struct + { + upb_table t; /* For entries that don't fit in the array part. */ + const upb_tabval* array; /* Array part of the table. See const note above. */ + size_t array_size; /* Array part size. */ + size_t array_count; /* Array part number of elements. */ + } upb_inttable; + + UPB_INLINE size_t upb_table_size(const upb_table* t) + { + if (t->size_lg2 == 0) + return 0; + else + return 1 << t->size_lg2; + } + + /* Internal-only functions, in .h file only out of necessity. */ + UPB_INLINE bool upb_tabent_isempty(const upb_tabent* e) + { + return e->key == 0; + } + + /* Initialize and uninitialize a table, respectively. If memory allocation + * failed, false is returned that the table is uninitialized. */ + bool upb_inttable_init(upb_inttable* table, upb_Arena* a); + bool upb_strtable_init(upb_strtable* table, size_t expected_size, upb_Arena* a); + + /* Returns the number of values in the table. */ + size_t upb_inttable_count(const upb_inttable* t); + UPB_INLINE size_t upb_strtable_count(const upb_strtable* t) + { + return t->t.count; + } + + void upb_strtable_clear(upb_strtable* t); + + /* Inserts the given key into the hashtable with the given value. The key must + * not already exist in the hash table. For strtables, the key is not required + * to be NULL-terminated, and the table will make an internal copy of the key. + * Inttables must not insert a value of UINTPTR_MAX. + * + * If a table resize was required but memory allocation failed, false is + * returned and the table is unchanged. */ + bool upb_inttable_insert(upb_inttable* t, uintptr_t key, upb_value val, upb_Arena* a); + bool upb_strtable_insert(upb_strtable* t, const char* key, size_t len, upb_value val, upb_Arena* a); + + /* Looks up key in this table, returning "true" if the key was found. + * If v is non-NULL, copies the value for this key into *v. */ + bool upb_inttable_lookup(const upb_inttable* t, uintptr_t key, upb_value* v); + bool upb_strtable_lookup2(const upb_strtable* t, const char* key, size_t len, upb_value* v); + + /* For NULL-terminated strings. */ + UPB_INLINE bool upb_strtable_lookup(const upb_strtable* t, const char* key, upb_value* v) + { + return upb_strtable_lookup2(t, key, strlen(key), v); + } + + /* Removes an item from the table. Returns true if the remove was successful, + * and stores the removed item in *val if non-NULL. */ + bool upb_inttable_remove(upb_inttable* t, uintptr_t key, upb_value* val); + bool upb_strtable_remove2(upb_strtable* t, const char* key, size_t len, upb_value* val); + + UPB_INLINE bool upb_strtable_remove(upb_strtable* t, const char* key, upb_value* v) + { + return upb_strtable_remove2(t, key, strlen(key), v); + } + + /* Updates an existing entry in an inttable. If the entry does not exist, + * returns false and does nothing. Unlike insert/remove, this does not + * invalidate iterators. */ + bool upb_inttable_replace(upb_inttable* t, uintptr_t key, upb_value val); + + /* Optimizes the table for the current set of entries, for both memory use and + * lookup time. Client should call this after all entries have been inserted; + * inserting more entries is legal, but will likely require a table resize. */ + void upb_inttable_compact(upb_inttable* t, upb_Arena* a); + + /* Exposed for testing only. */ + bool upb_strtable_resize(upb_strtable* t, size_t size_lg2, upb_Arena* a); + + /* Iterators ******************************************************************/ + + /* Iteration over inttable. + * + * intptr_t iter = UPB_INTTABLE_BEGIN; + * uintptr_t key; + * upb_value val; + * while (upb_inttable_next2(t, &key, &val, &iter)) { + * // ... + * } + */ + +#define UPB_INTTABLE_BEGIN -1 + + bool upb_inttable_next2(const upb_inttable* t, uintptr_t* key, upb_value* val, intptr_t* iter); + void upb_inttable_removeiter(upb_inttable* t, intptr_t* iter); + + /* Iteration over strtable. + * + * intptr_t iter = UPB_INTTABLE_BEGIN; + * upb_StringView key; + * upb_value val; + * while (upb_strtable_next2(t, &key, &val, &iter)) { + * // ... + * } + */ + +#define UPB_STRTABLE_BEGIN -1 + + bool upb_strtable_next2(const upb_strtable* t, upb_StringView* key, upb_value* val, intptr_t* iter); + void upb_strtable_removeiter(upb_strtable* t, intptr_t* iter); + + /* DEPRECATED iterators, slated for removal. + * + * Iterators for int and string tables. We are subject to some kind of unusual + * design constraints: + * + * For high-level languages: + * - we must be able to guarantee that we don't crash or corrupt memory even if + * the program accesses an invalidated iterator. + * + * For C++11 range-based for: + * - iterators must be copyable + * - iterators must be comparable + * - it must be possible to construct an "end" value. + * + * Iteration order is undefined. + * + * Modifying the table invalidates iterators. upb_{str,int}table_done() is + * guaranteed to work even on an invalidated iterator, as long as the table it + * is iterating over has not been freed. Calling next() or accessing data from + * an invalidated iterator yields unspecified elements from the table, but it is + * guaranteed not to crash and to return real table elements (except when done() + * is true). */ + + /* upb_strtable_iter **********************************************************/ + + /* upb_strtable_iter i; + * upb_strtable_begin(&i, t); + * for(; !upb_strtable_done(&i); upb_strtable_next(&i)) { + * const char *key = upb_strtable_iter_key(&i); + * const upb_value val = upb_strtable_iter_value(&i); + * // ... + * } + */ + + typedef struct + { + const upb_strtable* t; + size_t index; + } upb_strtable_iter; + + void upb_strtable_begin(upb_strtable_iter* i, const upb_strtable* t); + void upb_strtable_next(upb_strtable_iter* i); + bool upb_strtable_done(const upb_strtable_iter* i); + upb_StringView upb_strtable_iter_key(const upb_strtable_iter* i); + upb_value upb_strtable_iter_value(const upb_strtable_iter* i); + void upb_strtable_iter_setdone(upb_strtable_iter* i); + bool upb_strtable_iter_isequal(const upb_strtable_iter* i1, const upb_strtable_iter* i2); + + /* upb_inttable_iter **********************************************************/ + + /* upb_inttable_iter i; + * upb_inttable_begin(&i, t); + * for(; !upb_inttable_done(&i); upb_inttable_next(&i)) { + * uintptr_t key = upb_inttable_iter_key(&i); + * upb_value val = upb_inttable_iter_value(&i); + * // ... + * } + */ + + typedef struct + { + const upb_inttable* t; + size_t index; + bool array_part; + } upb_inttable_iter; + + UPB_INLINE const upb_tabent* str_tabent(const upb_strtable_iter* i) + { + return &i->t->t.entries[i->index]; + } + + void upb_inttable_begin(upb_inttable_iter* i, const upb_inttable* t); + void upb_inttable_next(upb_inttable_iter* i); + bool upb_inttable_done(const upb_inttable_iter* i); + uintptr_t upb_inttable_iter_key(const upb_inttable_iter* i); + upb_value upb_inttable_iter_value(const upb_inttable_iter* i); + void upb_inttable_iter_setdone(upb_inttable_iter* i); + bool upb_inttable_iter_isequal(const upb_inttable_iter* i1, const upb_inttable_iter* i2); + + uint32_t _upb_Hash(const void* p, size_t n, uint64_t seed); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_INTERNAL_TABLE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/internal/upb.h b/CAPI/cpp/grpc/include/upb/internal/upb.h new file mode 100644 index 00000000..666772d3 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/internal/upb.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_INTERNAL_UPB_H_ +#define UPB_INTERNAL_UPB_H_ + +#include "upb/upb.h" + +struct mem_block; +typedef struct mem_block mem_block; + +struct upb_Arena +{ + _upb_ArenaHead head; + /* Stores cleanup metadata for this arena. + * - a pointer to the current cleanup counter. + * - a boolean indicating if there is an unowned initial block. */ + uintptr_t cleanup_metadata; + + /* Allocator to allocate arena blocks. We are responsible for freeing these + * when we are destroyed. */ + upb_alloc* block_alloc; + uint32_t last_size; + + /* When multiple arenas are fused together, each arena points to a parent + * arena (root points to itself). The root tracks how many live arenas + * reference it. */ + uint32_t refcount; /* Only used when a->parent == a */ + struct upb_Arena* parent; + + /* Linked list of blocks to free/cleanup. */ + mem_block *freelist, *freelist_tail; +}; + +// Encodes a float or double that is round-trippable, but as short as possible. +// These routines are not fully optimal (not guaranteed to be shortest), but are +// short-ish and match the implementation that has been used in protobuf since +// the beginning. +// +// The given buffer size must be at least kUpb_RoundTripBufferSize. +enum +{ + kUpb_RoundTripBufferSize = 32 +}; +void _upb_EncodeRoundTripDouble(double val, char* buf, size_t size); +void _upb_EncodeRoundTripFloat(float val, char* buf, size_t size); + +#endif /* UPB_INTERNAL_UPB_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/internal/vsnprintf_compat.h b/CAPI/cpp/grpc/include/upb/internal/vsnprintf_compat.h new file mode 100644 index 00000000..55fbfdff --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/internal/vsnprintf_compat.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_INTERNAL_VSNPRINTF_COMPAT_H_ +#define UPB_INTERNAL_VSNPRINTF_COMPAT_H_ + +#include + +// Must be last. +#include "upb/port_def.inc" + +UPB_INLINE int _upb_vsnprintf(char* buf, size_t size, const char* fmt, va_list ap) +{ +#if defined(__MINGW64__) || defined(__MINGW32__) || defined(_MSC_VER) + // The msvc runtime has a non-conforming vsnprintf() that requires the + // following compatibility code to become conformant. + int n = -1; + if (size != 0) + n = _vsnprintf_s(buf, size, _TRUNCATE, fmt, ap); + if (n == -1) + n = _vscprintf(fmt, ap); + return n; +#else + return vsnprintf(buf, size, fmt, ap); +#endif +} + +#include "upb/port_undef.inc" + +#endif // UPB_INTERNAL_VSNPRINTF_COMPAT_H_ diff --git a/CAPI/cpp/grpc/include/upb/json_decode.h b/CAPI/cpp/grpc/include/upb/json_decode.h new file mode 100644 index 00000000..e70f28c6 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/json_decode.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_JSONDECODE_H_ +#define UPB_JSONDECODE_H_ + +#include "upb/def.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + enum + { + upb_JsonDecode_IgnoreUnknown = 1 + }; + + bool upb_JsonDecode(const char* buf, size_t size, upb_Message* msg, const upb_MessageDef* m, const upb_DefPool* symtab, int options, upb_Arena* arena, upb_Status* status); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_JSONDECODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/json_encode.h b/CAPI/cpp/grpc/include/upb/json_encode.h new file mode 100644 index 00000000..ec795ea9 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/json_encode.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_JSONENCODE_H_ +#define UPB_JSONENCODE_H_ + +#include "upb/def.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + enum + { + /* When set, emits 0/default values. TODO(haberman): proto3 only? */ + upb_JsonEncode_EmitDefaults = 1 << 0, + + /* When set, use normal (snake_case) field names instead of JSON (camelCase) + names. */ + upb_JsonEncode_UseProtoNames = 1 << 1, + + /* When set, emits enums as their integer values instead of as their names. */ + upb_JsonEncode_FormatEnumsAsIntegers = 1 << 2 + }; + + /* Encodes the given |msg| to JSON format. The message's reflection is given in + * |m|. The symtab in |symtab| is used to find extensions (if NULL, extensions + * will not be printed). + * + * Output is placed in the given buffer, and always NULL-terminated. The output + * size (excluding NULL) is returned. This means that a return value >= |size| + * implies that the output was truncated. (These are the same semantics as + * snprintf()). */ + size_t upb_JsonEncode(const upb_Message* msg, const upb_MessageDef* m, const upb_DefPool* ext_pool, int options, char* buf, size_t size, upb_Status* status); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_JSONENCODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/map.h b/CAPI/cpp/grpc/include/upb/map.h new file mode 100644 index 00000000..e8c078d8 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/map.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MAP_H_ +#define UPB_MAP_H_ + +#include "google/protobuf/descriptor.upb.h" +#include "upb/message_value.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /* Creates a new map on the given arena with the given key/value size. */ + upb_Map* upb_Map_New(upb_Arena* a, upb_CType key_type, upb_CType value_type); + + /* Returns the number of entries in the map. */ + size_t upb_Map_Size(const upb_Map* map); + + /* Stores a value for the given key into |*val| (or the zero value if the key is + * not present). Returns whether the key was present. The |val| pointer may be + * NULL, in which case the function tests whether the given key is present. */ + bool upb_Map_Get(const upb_Map* map, upb_MessageValue key, upb_MessageValue* val); + + /* Removes all entries in the map. */ + void upb_Map_Clear(upb_Map* map); + + typedef enum + { + // LINT.IfChange + kUpb_MapInsertStatus_Inserted = 0, + kUpb_MapInsertStatus_Replaced = 1, + kUpb_MapInsertStatus_OutOfMemory = 2, + // LINT.ThenChange(//depot/google3/third_party/upb/upb/msg_internal.h) + } upb_MapInsertStatus; + + /* Sets the given key to the given value, returning whether the key was inserted + * or replaced. If the key was inserted, then any existing iterators will be + * invalidated. */ + upb_MapInsertStatus upb_Map_Insert(upb_Map* map, upb_MessageValue key, upb_MessageValue val, upb_Arena* arena); + + /* Sets the given key to the given value. Returns false if memory allocation + * failed. If the key is newly inserted, then any existing iterators will be + * invalidated. */ + UPB_INLINE bool upb_Map_Set(upb_Map* map, upb_MessageValue key, upb_MessageValue val, upb_Arena* arena) + { + return upb_Map_Insert(map, key, val, arena) != + kUpb_MapInsertStatus_OutOfMemory; + } + + /* Deletes this key from the table. Returns true if the key was present. */ + bool upb_Map_Delete(upb_Map* map, upb_MessageValue key); + + /* Map iteration: + * + * size_t iter = kUpb_Map_Begin; + * while (upb_MapIterator_Next(map, &iter)) { + * upb_MessageValue key = upb_MapIterator_Key(map, iter); + * upb_MessageValue val = upb_MapIterator_Value(map, iter); + * + * // If mutating is desired. + * upb_MapIterator_SetValue(map, iter, value2); + * } + */ + + /* Advances to the next entry. Returns false if no more entries are present. */ + bool upb_MapIterator_Next(const upb_Map* map, size_t* iter); + + /* Returns true if the iterator still points to a valid entry, or false if the + * iterator is past the last element. It is an error to call this function with + * kUpb_Map_Begin (you must call next() at least once first). */ + bool upb_MapIterator_Done(const upb_Map* map, size_t iter); + + /* Returns the key and value for this entry of the map. */ + upb_MessageValue upb_MapIterator_Key(const upb_Map* map, size_t iter); + upb_MessageValue upb_MapIterator_Value(const upb_Map* map, size_t iter); + + /* Sets the value for this entry. The iterator must not be done, and the + * iterator must not have been initialized const. */ + void upb_MapIterator_SetValue(upb_Map* map, size_t iter, upb_MessageValue value); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_MAP_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/message_value.h b/CAPI/cpp/grpc/include/upb/message_value.h new file mode 100644 index 00000000..5df4d064 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/message_value.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MESSAGE_VALUE_H_ +#define UPB_MESSAGE_VALUE_H_ + +#include "google/protobuf/descriptor.upb.h" +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + // Definitions commn to both upb_Array and upb_Map. + + typedef union + { + bool bool_val; + float float_val; + double double_val; + int32_t int32_val; + int64_t int64_val; + uint32_t uint32_val; + uint64_t uint64_val; + const upb_Map* map_val; + const upb_Message* msg_val; + const upb_Array* array_val; + upb_StringView str_val; + } upb_MessageValue; + + typedef union + { + upb_Map* map; + upb_Message* msg; + upb_Array* array; + } upb_MutableMessageValue; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_MESSAGE_VALUE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/mini_descriptor.h b/CAPI/cpp/grpc/include/upb/mini_descriptor.h new file mode 100644 index 00000000..de751853 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/mini_descriptor.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2009-2022, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MINI_DESCRIPTOR_H_ +#define UPB_MINI_DESCRIPTOR_H_ + +#include "upb/def.h" +#include "upb/upb.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /** upb_MiniDescriptor ********************************************************/ + + upb_StringView upb_MiniDescriptor_EncodeEnum(const upb_EnumDef* enum_def, upb_Arena* a); + + upb_StringView upb_MiniDescriptor_EncodeExtension(const upb_FieldDef* field_def, upb_Arena* a); + + upb_StringView upb_MiniDescriptor_EncodeMessage( + const upb_MessageDef* message_def, upb_Arena* a + ); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_MINI_DESCRIPTOR_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/mini_table.h b/CAPI/cpp/grpc/include/upb/mini_table.h new file mode 100644 index 00000000..9e2b2aa8 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/mini_table.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2009-2022, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MINI_TABLE_H_ +#define UPB_MINI_TABLE_H_ + +#include "upb/msg_internal.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + const upb_MiniTable_Field* upb_MiniTable_FindFieldByNumber( + const upb_MiniTable* table, uint32_t number + ); + + UPB_INLINE const upb_MiniTable* upb_MiniTable_GetSubMessageTable( + const upb_MiniTable* mini_table, const upb_MiniTable_Field* field + ) + { + return mini_table->subs[field->submsg_index].submsg; + } + + UPB_INLINE bool upb_MiniTable_Enum_CheckValue(const upb_MiniTable_Enum* e, int32_t val) + { + uint32_t uval = (uint32_t)val; + if (uval < 64) + return e->mask & (1ULL << uval); + // OPT: binary search long lists? + int n = e->value_count; + for (int i = 0; i < n; i++) + { + if (e->values[i] == val) + return true; + } + return false; + } + + /** upb_MtDataEncoder *********************************************************/ + + // Functions to encode a string in a format that can be loaded by + // upb_MiniTable_Build(). + + typedef enum + { + kUpb_MessageModifier_ValidateUtf8 = 1 << 0, + kUpb_MessageModifier_DefaultIsPacked = 1 << 1, + kUpb_MessageModifier_IsExtendable = 1 << 2, + } kUpb_MessageModifier; + + typedef enum + { + kUpb_FieldModifier_IsRepeated = 1 << 0, + kUpb_FieldModifier_IsPacked = 1 << 1, + kUpb_FieldModifier_IsClosedEnum = 1 << 2, + kUpb_FieldModifier_IsProto3Singular = 1 << 3, + kUpb_FieldModifier_IsRequired = 1 << 4, + } kUpb_FieldModifier; + + typedef struct + { + char* end; // Limit of the buffer passed as a parameter. + // Aliased to internal-only members in .cc. + char internal[32]; + } upb_MtDataEncoder; + +// If the input buffer has at least this many bytes available, the encoder call +// is guaranteed to succeed (as long as field number order is maintained). +#define kUpb_MtDataEncoder_MinSize 16 + + // Encodes field/oneof information for a given message. The sequence of calls + // should look like: + // + // upb_MtDataEncoder e; + // char buf[256]; + // char* ptr = buf; + // e.end = ptr + sizeof(buf); + // ptr = upb_MtDataEncoder_StartMessage(&e, ptr); + // // Fields *must* be in field number order. + // ptr = upb_MtDataEncoder_PutField(&e, ptr, ...); + // ptr = upb_MtDataEncoder_PutField(&e, ptr, ...); + // ptr = upb_MtDataEncoder_PutField(&e, ptr, ...); + // + // // If oneofs are present. Oneofs must be encoded after regular fields. + // ptr = upb_MiniTable_StartOneof(&e, ptr) + // ptr = upb_MiniTable_PutOneofField(&e, ptr, ...); + // ptr = upb_MiniTable_PutOneofField(&e, ptr, ...); + // + // ptr = upb_MiniTable_StartOneof(&e, ptr); + // ptr = upb_MiniTable_PutOneofField(&e, ptr, ...); + // ptr = upb_MiniTable_PutOneofField(&e, ptr, ...); + // + // Oneofs must be encoded after all regular fields. + char* upb_MtDataEncoder_StartMessage(upb_MtDataEncoder* e, char* ptr, uint64_t msg_mod); + char* upb_MtDataEncoder_PutField(upb_MtDataEncoder* e, char* ptr, upb_FieldType type, uint32_t field_num, uint64_t field_mod); + char* upb_MtDataEncoder_StartOneof(upb_MtDataEncoder* e, char* ptr); + char* upb_MtDataEncoder_PutOneofField(upb_MtDataEncoder* e, char* ptr, uint32_t field_num); + + // Encodes the set of values for a given enum. The values must be given in + // order (after casting to uint32_t), and repeats are not allowed. + void upb_MtDataEncoder_StartEnum(upb_MtDataEncoder* e); + char* upb_MtDataEncoder_PutEnumValue(upb_MtDataEncoder* e, char* ptr, uint32_t val); + char* upb_MtDataEncoder_EndEnum(upb_MtDataEncoder* e, char* ptr); + + /** upb_MiniTable *************************************************************/ + + typedef enum + { + kUpb_MiniTablePlatform_32Bit, + kUpb_MiniTablePlatform_64Bit, + kUpb_MiniTablePlatform_Native = + UPB_SIZE(kUpb_MiniTablePlatform_32Bit, kUpb_MiniTablePlatform_64Bit), + } upb_MiniTablePlatform; + + // Builds a mini table from the data encoded in the buffer [data, len]. If any + // errors occur, returns NULL and sets a status message. In the success case, + // the caller must call upb_MiniTable_SetSub*() for all message or proto2 enum + // fields to link the table to the appropriate sub-tables. + upb_MiniTable* upb_MiniTable_Build(const char* data, size_t len, upb_MiniTablePlatform platform, upb_Arena* arena, upb_Status* status); + void upb_MiniTable_SetSubMessage(upb_MiniTable* table, upb_MiniTable_Field* field, const upb_MiniTable* sub); + void upb_MiniTable_SetSubEnum(upb_MiniTable* table, upb_MiniTable_Field* field, const upb_MiniTable_Enum* sub); + + bool upb_MiniTable_BuildExtension(const char* data, size_t len, upb_MiniTable_Extension* ext, upb_MiniTable_Sub sub, upb_Status* status); + + // Special-case functions for MessageSet layout and map entries. + upb_MiniTable* upb_MiniTable_BuildMessageSet(upb_MiniTablePlatform platform, upb_Arena* arena); + upb_MiniTable* upb_MiniTable_BuildMapEntry(upb_FieldType key_type, upb_FieldType value_type, bool value_is_proto3_enum, upb_MiniTablePlatform platform, upb_Arena* arena); + + upb_MiniTable_Enum* upb_MiniTable_BuildEnum(const char* data, size_t len, upb_Arena* arena, upb_Status* status); + + // Like upb_MiniTable_Build(), but the user provides a buffer of layout data so + // it can be reused from call to call, avoiding repeated realloc()/free(). + // + // The caller owns `*buf` both before and after the call, and must free() it + // when it is no longer in use. The function will realloc() `*buf` as + // necessary, updating `*size` accordingly. + upb_MiniTable* upb_MiniTable_BuildWithBuf(const char* data, size_t len, upb_MiniTablePlatform platform, upb_Arena* arena, void** buf, size_t* buf_size, upb_Status* status); + + // For testing only. + char upb_ToBase92(int8_t ch); + char upb_FromBase92(uint8_t ch); + bool upb_IsTypePackable(upb_FieldType type); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_MINI_TABLE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/mini_table.hpp b/CAPI/cpp/grpc/include/upb/mini_table.hpp new file mode 100644 index 00000000..bf76477b --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/mini_table.hpp @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MINI_TABLE_HPP_ +#define UPB_MINI_TABLE_HPP_ + +#include + +#include + +#include "upb/mini_table.h" + +namespace upb +{ + + class MtDataEncoder + { + public: + MtDataEncoder() : + appender_(&encoder_) + { + } + + bool StartMessage(uint64_t msg_mod) + { + return appender_([=](char* buf) + { return upb_MtDataEncoder_StartMessage(&encoder_, buf, msg_mod); }); + } + + bool PutField(upb_FieldType type, uint32_t field_num, uint64_t field_mod) + { + return appender_([=](char* buf) + { return upb_MtDataEncoder_PutField(&encoder_, buf, type, field_num, field_mod); }); + } + + bool StartOneof() + { + return appender_([=](char* buf) + { return upb_MtDataEncoder_StartOneof(&encoder_, buf); }); + } + + bool PutOneofField(uint32_t field_num) + { + return appender_([=](char* buf) + { return upb_MtDataEncoder_PutOneofField(&encoder_, buf, field_num); }); + } + + void StartEnum() + { + upb_MtDataEncoder_StartEnum(&encoder_); + } + + bool PutEnumValue(uint32_t enum_value) + { + return appender_([=](char* buf) + { return upb_MtDataEncoder_PutEnumValue(&encoder_, buf, enum_value); }); + } + + bool EndEnum() + { + return appender_( + [=](char* buf) + { return upb_MtDataEncoder_EndEnum(&encoder_, buf); } + ); + } + + const std::string& data() const + { + return appender_.data(); + } + + private: + class StringAppender + { + public: + StringAppender(upb_MtDataEncoder* e) + { + e->end = buf_ + sizeof(buf_); + } + + template + bool operator()(T&& func) + { + char* end = func(buf_); + if (!end) + return false; + // C++ does not guarantee that string has doubling growth behavior, but + // we need it to avoid O(n^2). + str_.reserve(_upb_Log2CeilingSize(str_.size() + (end - buf_))); + str_.append(buf_, end - buf_); + return true; + } + + const std::string& data() const + { + return str_; + } + + private: + char buf_[kUpb_MtDataEncoder_MinSize]; + std::string str_; + }; + + upb_MtDataEncoder encoder_; + StringAppender appender_; + }; + +} // namespace upb + +#endif /* UPB_MINI_TABLE_HPP_ */ diff --git a/CAPI/cpp/grpc/include/upb/mini_table_accessors.h b/CAPI/cpp/grpc/include/upb/mini_table_accessors.h new file mode 100644 index 00000000..1234177e --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/mini_table_accessors.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2009-2022, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MINI_TABLE_ACCESSORS_H_ +#define UPB_MINI_TABLE_ACCESSORS_H_ + +#include "upb/array.h" +#include "upb/internal/mini_table_accessors.h" +#include "upb/msg_internal.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + bool upb_MiniTable_HasField(const upb_Message* msg, const upb_MiniTable_Field* field); + + void upb_MiniTable_ClearField(upb_Message* msg, const upb_MiniTable_Field* field); + + UPB_INLINE bool upb_MiniTable_GetBool(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Bool); + return *UPB_PTR_AT(msg, field->offset, bool); + } + + UPB_INLINE void upb_MiniTable_SetBool(upb_Message* msg, const upb_MiniTable_Field* field, bool value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Bool); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, bool) = value; + } + + UPB_INLINE int32_t upb_MiniTable_GetInt32(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Int32 || field->descriptortype == kUpb_FieldType_SInt32 || field->descriptortype == kUpb_FieldType_SFixed32); + return *UPB_PTR_AT(msg, field->offset, int32_t); + } + + UPB_INLINE void upb_MiniTable_SetInt32(upb_Message* msg, const upb_MiniTable_Field* field, int32_t value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Int32 || field->descriptortype == kUpb_FieldType_SInt32 || field->descriptortype == kUpb_FieldType_SFixed32); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, int32_t) = value; + } + + UPB_INLINE uint32_t upb_MiniTable_GetUInt32(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_UInt32 || field->descriptortype == kUpb_FieldType_Fixed32); + return *UPB_PTR_AT(msg, field->offset, uint32_t); + } + + UPB_INLINE void upb_MiniTable_SetUInt32(upb_Message* msg, const upb_MiniTable_Field* field, uint32_t value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_UInt32 || field->descriptortype == kUpb_FieldType_Fixed32); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, uint32_t) = value; + } + + UPB_INLINE int32_t upb_MiniTable_GetEnum(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Enum); + return *UPB_PTR_AT(msg, field->offset, int32_t); + } + + UPB_INLINE void upb_MiniTable_SetEnum(upb_Message* msg, const upb_MiniTable_Field* field, int32_t value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Enum); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, int32_t) = value; + } + + UPB_INLINE int64_t upb_MiniTable_GetInt64(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Int64 || field->descriptortype == kUpb_FieldType_SInt64 || field->descriptortype == kUpb_FieldType_SFixed64); + return *UPB_PTR_AT(msg, field->offset, int64_t); + } + + UPB_INLINE void upb_MiniTable_SetInt64(upb_Message* msg, const upb_MiniTable_Field* field, int64_t value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Int64 || field->descriptortype == kUpb_FieldType_SInt64 || field->descriptortype == kUpb_FieldType_SFixed64); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, int64_t) = value; + } + + UPB_INLINE uint64_t upb_MiniTable_GetUInt64(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_UInt64 || field->descriptortype == kUpb_FieldType_Fixed64); + return *UPB_PTR_AT(msg, field->offset, uint64_t); + } + + UPB_INLINE void upb_MiniTable_SetUInt64(upb_Message* msg, const upb_MiniTable_Field* field, uint64_t value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_UInt64 || field->descriptortype == kUpb_FieldType_Fixed64); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, uint64_t) = value; + } + + UPB_INLINE float upb_MiniTable_GetFloat(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Float); + return *UPB_PTR_AT(msg, field->offset, float); + } + + UPB_INLINE void upb_MiniTable_SetFloat(upb_Message* msg, const upb_MiniTable_Field* field, float value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Float); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, float) = value; + } + + UPB_INLINE double upb_MiniTable_GetDouble(const upb_Message* msg, const upb_MiniTable_Field* field) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Double); + return *UPB_PTR_AT(msg, field->offset, double); + } + + UPB_INLINE void upb_MiniTable_SetDouble(upb_Message* msg, const upb_MiniTable_Field* field, double value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Double); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, double) = value; + } + + UPB_INLINE upb_StringView upb_MiniTable_GetString( + const upb_Message* msg, const upb_MiniTable_Field* field + ) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Bytes || field->descriptortype == kUpb_FieldType_String); + return *UPB_PTR_AT(msg, field->offset, upb_StringView); + } + + UPB_INLINE void upb_MiniTable_SetString(upb_Message* msg, const upb_MiniTable_Field* field, upb_StringView value) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Bytes || field->descriptortype == kUpb_FieldType_String); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, upb_StringView) = value; + } + + UPB_INLINE const upb_Message* upb_MiniTable_GetMessage( + const upb_Message* msg, const upb_MiniTable_Field* field + ) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Message || field->descriptortype == kUpb_FieldType_Group); + return *UPB_PTR_AT(msg, field->offset, const upb_Message*); + } + + UPB_INLINE void upb_MiniTable_SetMessage(upb_Message* msg, const upb_MiniTable_Field* field, upb_Message* sub_message) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Message || field->descriptortype == kUpb_FieldType_Group); + _upb_MiniTable_SetPresence(msg, field); + *UPB_PTR_AT(msg, field->offset, const upb_Message*) = sub_message; + } + + UPB_INLINE upb_Message* upb_MiniTable_GetMutableMessage( + upb_Message* msg, const upb_MiniTable* mini_table, const upb_MiniTable_Field* field, upb_Arena* arena + ) + { + UPB_ASSERT(field->descriptortype == kUpb_FieldType_Message || field->descriptortype == kUpb_FieldType_Group); + upb_Message* sub_message = *UPB_PTR_AT(msg, field->offset, upb_Message*); + if (!sub_message) + { + sub_message = + _upb_Message_New(mini_table->subs[field->submsg_index].submsg, arena); + *UPB_PTR_AT(msg, field->offset, upb_Message*) = sub_message; + _upb_MiniTable_SetPresence(msg, field); + } + return sub_message; + } + + UPB_INLINE const upb_Array* upb_MiniTable_GetArray( + const upb_Message* msg, const upb_MiniTable_Field* field + ) + { + return (const upb_Array*)*UPB_PTR_AT(msg, field->offset, upb_Array*); + } + + UPB_INLINE upb_Array* upb_MiniTable_GetMutableArray( + upb_Message* msg, const upb_MiniTable_Field* field + ) + { + return (upb_Array*)*UPB_PTR_AT(msg, field->offset, upb_Array*); + } + + void* upb_MiniTable_ResizeArray(upb_Message* msg, const upb_MiniTable_Field* field, size_t len, upb_Arena* arena); + typedef enum + { + kUpb_GetExtension_Ok, + kUpb_GetExtension_NotPresent, + kUpb_GetExtension_ParseError, + kUpb_GetExtension_OutOfMemory, + } upb_GetExtension_Status; + + typedef enum + { + kUpb_GetExtensionAsBytes_Ok, + kUpb_GetExtensionAsBytes_NotPresent, + kUpb_GetExtensionAsBytes_EncodeError, + } upb_GetExtensionAsBytes_Status; + + // Returns a message extension or promotes an unknown field to + // an extension. + // + // TODO(ferhat): Only supports extension fields that are messages, + // expand support to include non-message types. + upb_GetExtension_Status upb_MiniTable_GetOrPromoteExtension( + upb_Message* msg, const upb_MiniTable_Extension* ext_table, int decode_options, upb_Arena* arena, const upb_Message_Extension** extension + ); + + // Returns a message extension or unknown field matching the extension + // data as bytes. + // + // If an extension has already been decoded it will be re-encoded + // to bytes. + upb_GetExtensionAsBytes_Status upb_MiniTable_GetExtensionAsBytes( + const upb_Message* msg, const upb_MiniTable_Extension* ext_table, int encode_options, upb_Arena* arena, const char** extension_data, size_t* len + ); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif // UPB_MINI_TABLE_ACCESSORS_H_ diff --git a/CAPI/cpp/grpc/include/upb/mini_table_accessors_internal.h b/CAPI/cpp/grpc/include/upb/mini_table_accessors_internal.h new file mode 100644 index 00000000..8368b65b --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/mini_table_accessors_internal.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2009-2022, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_MINI_TABLE_ACCESSORS_INTERNAL_H_ +#define UPB_MINI_TABLE_ACCESSORS_INTERNAL_H_ + +// TODO(b/232091617): Delete this entire header which currently exists only for +// temporary backwards compatibility. + +#include "upb/internal/mini_table_accessors.h" + +#endif // UPB_MINI_TABLE_ACCESSORS_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/upb/msg.h b/CAPI/cpp/grpc/include/upb/msg.h new file mode 100644 index 00000000..7d04bb1a --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/msg.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Public APIs for message operations that do not require descriptors. + * These functions can be used even in build that does not want to depend on + * reflection or descriptors. + * + * Descriptor-based reflection functionality lives in reflection.h. + */ + +#ifndef UPB_MSG_H_ +#define UPB_MSG_H_ + +#include + +// TODO(b/232091617): Remove this and fix everything that breaks as a result. +#include "upb/extension_registry.h" +#include "upb/upb.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + typedef void upb_Message; + + /* For users these are opaque. They can be obtained from + * upb_MessageDef_MiniTable() but users cannot access any of the members. */ + struct upb_MiniTable; + typedef struct upb_MiniTable upb_MiniTable; + + /* Adds unknown data (serialized protobuf data) to the given message. The data + * is copied into the message instance. */ + void upb_Message_AddUnknown(upb_Message* msg, const char* data, size_t len, upb_Arena* arena); + + /* Returns a reference to the message's unknown data. */ + const char* upb_Message_GetUnknown(const upb_Message* msg, size_t* len); + + /* Returns the number of extensions present in this message. */ + size_t upb_Message_ExtensionCount(const upb_Message* msg); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_MSG_INT_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/msg_internal.h b/CAPI/cpp/grpc/include/upb/msg_internal.h new file mode 100644 index 00000000..0aa71853 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/msg_internal.h @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* +** Our memory representation for parsing tables and messages themselves. +** Functions in this file are used by generated code and possibly reflection. +** +** The definitions in this file are internal to upb. +**/ + +#ifndef UPB_MSG_INT_H_ +#define UPB_MSG_INT_H_ + +#include +#include +#include + +#include "upb/extension_registry.h" +#include "upb/internal/table.h" +#include "upb/msg.h" +#include "upb/upb.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /** upb_*Int* conversion routines ********************************************/ + + UPB_INLINE int32_t _upb_Int32_FromI(int v) + { + return (int32_t)v; + } + + UPB_INLINE int64_t _upb_Int64_FromLL(long long v) + { + return (int64_t)v; + } + + UPB_INLINE uint32_t _upb_UInt32_FromU(unsigned v) + { + return (uint32_t)v; + } + + UPB_INLINE uint64_t _upb_UInt64_FromULL(unsigned long long v) + { + return (uint64_t)v; + } + + /** upb_MiniTable *************************************************************/ + + /* upb_MiniTable represents the memory layout of a given upb_MessageDef. The + * members are public so generated code can initialize them, but users MUST NOT + * read or write any of its members. */ + + typedef struct + { + uint32_t number; + uint16_t offset; + int16_t presence; // If >0, hasbit_index. If <0, ~oneof_index + uint16_t submsg_index; // kUpb_NoSub if descriptortype != MESSAGE/GROUP/ENUM + uint8_t descriptortype; + uint8_t mode; /* upb_FieldMode | upb_LabelFlags | + (upb_FieldRep << kUpb_FieldRep_Shift) */ + } upb_MiniTable_Field; + +#define kUpb_NoSub ((uint16_t)-1) + + typedef enum + { + kUpb_FieldMode_Map = 0, + kUpb_FieldMode_Array = 1, + kUpb_FieldMode_Scalar = 2, + } upb_FieldMode; + +// Mask to isolate the upb_FieldMode from field.mode. +#define kUpb_FieldMode_Mask 3 + + /* Extra flags on the mode field. */ + typedef enum + { + kUpb_LabelFlags_IsPacked = 4, + kUpb_LabelFlags_IsExtension = 8, + } upb_LabelFlags; + + // Note: we sort by this number when calculating layout order. + typedef enum + { + kUpb_FieldRep_1Byte = 0, + kUpb_FieldRep_4Byte = 1, + kUpb_FieldRep_StringView = 2, + kUpb_FieldRep_Pointer = 3, + kUpb_FieldRep_8Byte = 4, + + kUpb_FieldRep_Shift = 5, // Bit offset of the rep in upb_MiniTable_Field.mode + kUpb_FieldRep_Max = kUpb_FieldRep_8Byte, + } upb_FieldRep; + + UPB_INLINE upb_FieldMode upb_FieldMode_Get(const upb_MiniTable_Field* field) + { + return (upb_FieldMode)(field->mode & 3); + } + + UPB_INLINE bool upb_IsRepeatedOrMap(const upb_MiniTable_Field* field) + { + /* This works because upb_FieldMode has no value 3. */ + return !(field->mode & kUpb_FieldMode_Scalar); + } + + UPB_INLINE bool upb_IsSubMessage(const upb_MiniTable_Field* field) + { + return field->descriptortype == kUpb_FieldType_Message || + field->descriptortype == kUpb_FieldType_Group; + } + + struct upb_Decoder; + struct upb_MiniTable; + + typedef const char* _upb_FieldParser(struct upb_Decoder* d, const char* ptr, upb_Message* msg, intptr_t table, uint64_t hasbits, uint64_t data); + + typedef struct + { + uint64_t field_data; + _upb_FieldParser* field_parser; + } _upb_FastTable_Entry; + + typedef struct + { + const int32_t* values; // List of values <0 or >63 + uint64_t mask; // Bits are set for acceptable value 0 <= x < 64 + int value_count; + } upb_MiniTable_Enum; + + typedef union + { + const struct upb_MiniTable* submsg; + const upb_MiniTable_Enum* subenum; + } upb_MiniTable_Sub; + + typedef enum + { + kUpb_ExtMode_NonExtendable = 0, // Non-extendable message. + kUpb_ExtMode_Extendable = 1, // Normal extendable message. + kUpb_ExtMode_IsMessageSet = 2, // MessageSet message. + kUpb_ExtMode_IsMessageSet_ITEM = + 3, // MessageSet item (temporary only, see decode.c) + + // During table building we steal a bit to indicate that the message is a map + // entry. *Only* used during table building! + kUpb_ExtMode_IsMapEntry = 4, + } upb_ExtMode; + + /* MessageSet wire format is: + * message MessageSet { + * repeated group Item = 1 { + * required int32 type_id = 2; + * required bytes message = 3; + * } + * } + */ + typedef enum + { + _UPB_MSGSET_ITEM = 1, + _UPB_MSGSET_TYPEID = 2, + _UPB_MSGSET_MESSAGE = 3, + } upb_msgext_fieldnum; + + struct upb_MiniTable + { + const upb_MiniTable_Sub* subs; + const upb_MiniTable_Field* fields; + /* Must be aligned to sizeof(void*). Doesn't include internal members like + * unknown fields, extension dict, pointer to msglayout, etc. */ + uint16_t size; + uint16_t field_count; + uint8_t ext; // upb_ExtMode, declared as uint8_t so sizeof(ext) == 1 + uint8_t dense_below; + uint8_t table_mask; + uint8_t required_count; // Required fields have the lowest hasbits. + /* To statically initialize the tables of variable length, we need a flexible + * array member, and we need to compile in gnu99 mode (constant initialization + * of flexible array members is a GNU extension, not in C99 unfortunately. */ + _upb_FastTable_Entry fasttable[]; + }; + + typedef struct + { + upb_MiniTable_Field field; + const upb_MiniTable* extendee; + upb_MiniTable_Sub sub; /* NULL unless submessage or proto2 enum */ + } upb_MiniTable_Extension; + + typedef struct + { + const upb_MiniTable** msgs; + const upb_MiniTable_Enum** enums; + const upb_MiniTable_Extension** exts; + int msg_count; + int enum_count; + int ext_count; + } upb_MiniTable_File; + + // Computes a bitmask in which the |l->required_count| lowest bits are set, + // except that we skip the lowest bit (because upb never uses hasbit 0). + // + // Sample output: + // requiredmask(1) => 0b10 (0x2) + // requiredmask(5) => 0b111110 (0x3e) + UPB_INLINE uint64_t upb_MiniTable_requiredmask(const upb_MiniTable* l) + { + int n = l->required_count; + assert(0 < n && n <= 63); + return ((1ULL << n) - 1) << 1; + } + + /** upb_ExtensionRegistry *****************************************************/ + + /* Adds the given extension info for message type |l| and field number |num| + * into the registry. Returns false if this message type and field number were + * already in the map, or if memory allocation fails. */ + bool _upb_extreg_add(upb_ExtensionRegistry* r, const upb_MiniTable_Extension** e, size_t count); + + /* Looks up the extension (if any) defined for message type |l| and field + * number |num|. If an extension was found, copies the field info into |*ext| + * and returns true. Otherwise returns false. */ + const upb_MiniTable_Extension* _upb_extreg_get(const upb_ExtensionRegistry* r, const upb_MiniTable* l, uint32_t num); + + /** upb_Message ***************************************************************/ + + /* Internal members of a upb_Message that track unknown fields and/or + * extensions. We can change this without breaking binary compatibility. We put + * these before the user's data. The user's upb_Message* points after the + * upb_Message_Internal. */ + + typedef struct + { + /* Total size of this structure, including the data that follows. + * Must be aligned to 8, which is alignof(upb_Message_Extension) */ + uint32_t size; + + /* Offsets relative to the beginning of this structure. + * + * Unknown data grows forward from the beginning to unknown_end. + * Extension data grows backward from size to ext_begin. + * When the two meet, we're out of data and have to realloc. + * + * If we imagine that the final member of this struct is: + * char data[size - overhead]; // overhead = + * sizeof(upb_Message_InternalData) + * + * Then we have: + * unknown data: data[0 .. (unknown_end - overhead)] + * extensions data: data[(ext_begin - overhead) .. (size - overhead)] */ + uint32_t unknown_end; + uint32_t ext_begin; + /* Data follows, as if there were an array: + * char data[size - sizeof(upb_Message_InternalData)]; */ + } upb_Message_InternalData; + + typedef struct + { + upb_Message_InternalData* internal; + /* Message data follows. */ + } upb_Message_Internal; + + /* Maps upb_CType -> memory size. */ + extern char _upb_CTypeo_size[12]; + + UPB_INLINE size_t upb_msg_sizeof(const upb_MiniTable* l) + { + return l->size + sizeof(upb_Message_Internal); + } + + UPB_INLINE upb_Message* _upb_Message_New_inl(const upb_MiniTable* l, upb_Arena* a) + { + size_t size = upb_msg_sizeof(l); + void* mem = upb_Arena_Malloc(a, size + sizeof(upb_Message_Internal)); + upb_Message* msg; + if (UPB_UNLIKELY(!mem)) + return NULL; + msg = UPB_PTR_AT(mem, sizeof(upb_Message_Internal), upb_Message); + memset(mem, 0, size); + return msg; + } + + /* Creates a new messages with the given layout on the given arena. */ + upb_Message* _upb_Message_New(const upb_MiniTable* l, upb_Arena* a); + + UPB_INLINE upb_Message_Internal* upb_Message_Getinternal(upb_Message* msg) + { + ptrdiff_t size = sizeof(upb_Message_Internal); + return (upb_Message_Internal*)((char*)msg - size); + } + + /* Clears the given message. */ + void _upb_Message_Clear(upb_Message* msg, const upb_MiniTable* l); + + /* Discards the unknown fields for this message only. */ + void _upb_Message_DiscardUnknown_shallow(upb_Message* msg); + + /* Adds unknown data (serialized protobuf data) to the given message. The data + * is copied into the message instance. */ + bool _upb_Message_AddUnknown(upb_Message* msg, const char* data, size_t len, upb_Arena* arena); + + /** upb_Message_Extension *****************************************************/ + + /* The internal representation of an extension is self-describing: it contains + * enough information that we can serialize it to binary format without needing + * to look it up in a upb_ExtensionRegistry. + * + * This representation allocates 16 bytes to data on 64-bit platforms. This is + * rather wasteful for scalars (in the extreme case of bool, it wastes 15 + * bytes). We accept this because we expect messages to be the most common + * extension type. */ + typedef struct + { + const upb_MiniTable_Extension* ext; + union + { + upb_StringView str; + void* ptr; + char scalar_data[8]; + } data; + } upb_Message_Extension; + + /* Adds the given extension data to the given message. |ext| is copied into the + * message instance. This logically replaces any previously-added extension with + * this number */ + upb_Message_Extension* _upb_Message_GetOrCreateExtension( + upb_Message* msg, const upb_MiniTable_Extension* ext, upb_Arena* arena + ); + + /* Returns an array of extensions for this message. Note: the array is + * ordered in reverse relative to the order of creation. */ + const upb_Message_Extension* _upb_Message_Getexts(const upb_Message* msg, size_t* count); + + /* Returns an extension for the given field number, or NULL if no extension + * exists for this field number. */ + const upb_Message_Extension* _upb_Message_Getext( + const upb_Message* msg, const upb_MiniTable_Extension* ext + ); + + void _upb_Message_Clearext(upb_Message* msg, const upb_MiniTable_Extension* ext); + + void _upb_Message_Clearext(upb_Message* msg, const upb_MiniTable_Extension* ext); + + /** Hasbit access *************************************************************/ + + UPB_INLINE bool _upb_hasbit(const upb_Message* msg, size_t idx) + { + return (*UPB_PTR_AT(msg, idx / 8, const char) & (1 << (idx % 8))) != 0; + } + + UPB_INLINE void _upb_sethas(const upb_Message* msg, size_t idx) + { + (*UPB_PTR_AT(msg, idx / 8, char)) |= (char)(1 << (idx % 8)); + } + + UPB_INLINE void _upb_clearhas(const upb_Message* msg, size_t idx) + { + (*UPB_PTR_AT(msg, idx / 8, char)) &= (char)(~(1 << (idx % 8))); + } + + UPB_INLINE size_t _upb_Message_Hasidx(const upb_MiniTable_Field* f) + { + UPB_ASSERT(f->presence > 0); + return f->presence; + } + + UPB_INLINE bool _upb_hasbit_field(const upb_Message* msg, const upb_MiniTable_Field* f) + { + return _upb_hasbit(msg, _upb_Message_Hasidx(f)); + } + + UPB_INLINE void _upb_sethas_field(const upb_Message* msg, const upb_MiniTable_Field* f) + { + _upb_sethas(msg, _upb_Message_Hasidx(f)); + } + + UPB_INLINE void _upb_clearhas_field(const upb_Message* msg, const upb_MiniTable_Field* f) + { + _upb_clearhas(msg, _upb_Message_Hasidx(f)); + } + + /** Oneof case access *********************************************************/ + + UPB_INLINE uint32_t* _upb_oneofcase(upb_Message* msg, size_t case_ofs) + { + return UPB_PTR_AT(msg, case_ofs, uint32_t); + } + + UPB_INLINE uint32_t _upb_getoneofcase(const void* msg, size_t case_ofs) + { + return *UPB_PTR_AT(msg, case_ofs, uint32_t); + } + + UPB_INLINE size_t _upb_oneofcase_ofs(const upb_MiniTable_Field* f) + { + UPB_ASSERT(f->presence < 0); + return ~(ptrdiff_t)f->presence; + } + + UPB_INLINE uint32_t* _upb_oneofcase_field(upb_Message* msg, const upb_MiniTable_Field* f) + { + return _upb_oneofcase(msg, _upb_oneofcase_ofs(f)); + } + + UPB_INLINE uint32_t _upb_getoneofcase_field(const upb_Message* msg, const upb_MiniTable_Field* f) + { + return _upb_getoneofcase(msg, _upb_oneofcase_ofs(f)); + } + + UPB_INLINE bool _upb_has_submsg_nohasbit(const upb_Message* msg, size_t ofs) + { + return *UPB_PTR_AT(msg, ofs, const upb_Message*) != NULL; + } + + /** upb_Array *****************************************************************/ + + /* Our internal representation for repeated fields. */ + typedef struct + { + uintptr_t data; /* Tagged ptr: low 3 bits of ptr are lg2(elem size). */ + size_t len; /* Measured in elements. */ + size_t size; /* Measured in elements. */ + uint64_t junk; + } upb_Array; + + UPB_INLINE const void* _upb_array_constptr(const upb_Array* arr) + { + UPB_ASSERT((arr->data & 7) <= 4); + return (void*)(arr->data & ~(uintptr_t)7); + } + + UPB_INLINE uintptr_t _upb_array_tagptr(void* ptr, int elem_size_lg2) + { + UPB_ASSERT(elem_size_lg2 <= 4); + return (uintptr_t)ptr | elem_size_lg2; + } + + UPB_INLINE void* _upb_array_ptr(upb_Array* arr) + { + return (void*)_upb_array_constptr(arr); + } + + UPB_INLINE uintptr_t _upb_tag_arrptr(void* ptr, int elem_size_lg2) + { + UPB_ASSERT(elem_size_lg2 <= 4); + UPB_ASSERT(((uintptr_t)ptr & 7) == 0); + return (uintptr_t)ptr | (unsigned)elem_size_lg2; + } + + UPB_INLINE upb_Array* _upb_Array_New(upb_Arena* a, size_t init_size, int elem_size_lg2) + { + const size_t arr_size = UPB_ALIGN_UP(sizeof(upb_Array), 8); + const size_t bytes = sizeof(upb_Array) + (init_size << elem_size_lg2); + upb_Array* arr = (upb_Array*)upb_Arena_Malloc(a, bytes); + if (!arr) + return NULL; + arr->data = _upb_tag_arrptr(UPB_PTR_AT(arr, arr_size, void), elem_size_lg2); + arr->len = 0; + arr->size = init_size; + return arr; + } + + /* Resizes the capacity of the array to be at least min_size. */ + bool _upb_array_realloc(upb_Array* arr, size_t min_size, upb_Arena* arena); + + /* Fallback functions for when the accessors require a resize. */ + void* _upb_Array_Resize_fallback(upb_Array** arr_ptr, size_t size, int elem_size_lg2, upb_Arena* arena); + bool _upb_Array_Append_fallback(upb_Array** arr_ptr, const void* value, int elem_size_lg2, upb_Arena* arena); + + UPB_INLINE bool _upb_array_reserve(upb_Array* arr, size_t size, upb_Arena* arena) + { + if (arr->size < size) + return _upb_array_realloc(arr, size, arena); + return true; + } + + UPB_INLINE bool _upb_Array_Resize(upb_Array* arr, size_t size, upb_Arena* arena) + { + if (!_upb_array_reserve(arr, size, arena)) + return false; + arr->len = size; + return true; + } + + UPB_INLINE void _upb_array_detach(const void* msg, size_t ofs) + { + *UPB_PTR_AT(msg, ofs, upb_Array*) = NULL; + } + + UPB_INLINE const void* _upb_array_accessor(const void* msg, size_t ofs, size_t* size) + { + const upb_Array* arr = *UPB_PTR_AT(msg, ofs, const upb_Array*); + if (arr) + { + if (size) + *size = arr->len; + return _upb_array_constptr(arr); + } + else + { + if (size) + *size = 0; + return NULL; + } + } + + UPB_INLINE void* _upb_array_mutable_accessor(void* msg, size_t ofs, size_t* size) + { + upb_Array* arr = *UPB_PTR_AT(msg, ofs, upb_Array*); + if (arr) + { + if (size) + *size = arr->len; + return _upb_array_ptr(arr); + } + else + { + if (size) + *size = 0; + return NULL; + } + } + + UPB_INLINE void* _upb_Array_Resize_accessor2(void* msg, size_t ofs, size_t size, int elem_size_lg2, upb_Arena* arena) + { + upb_Array** arr_ptr = UPB_PTR_AT(msg, ofs, upb_Array*); + upb_Array* arr = *arr_ptr; + if (!arr || arr->size < size) + { + return _upb_Array_Resize_fallback(arr_ptr, size, elem_size_lg2, arena); + } + arr->len = size; + return _upb_array_ptr(arr); + } + + UPB_INLINE bool _upb_Array_Append_accessor2(void* msg, size_t ofs, int elem_size_lg2, const void* value, upb_Arena* arena) + { + upb_Array** arr_ptr = UPB_PTR_AT(msg, ofs, upb_Array*); + size_t elem_size = 1 << elem_size_lg2; + upb_Array* arr = *arr_ptr; + void* ptr; + if (!arr || arr->len == arr->size) + { + return _upb_Array_Append_fallback(arr_ptr, value, elem_size_lg2, arena); + } + ptr = _upb_array_ptr(arr); + memcpy(UPB_PTR_AT(ptr, arr->len * elem_size, char), value, elem_size); + arr->len++; + return true; + } + + /* Used by old generated code, remove once all code has been regenerated. */ + UPB_INLINE int _upb_sizelg2(upb_CType type) + { + switch (type) + { + case kUpb_CType_Bool: + return 0; + case kUpb_CType_Float: + case kUpb_CType_Int32: + case kUpb_CType_UInt32: + case kUpb_CType_Enum: + return 2; + case kUpb_CType_Message: + return UPB_SIZE(2, 3); + case kUpb_CType_Double: + case kUpb_CType_Int64: + case kUpb_CType_UInt64: + return 3; + case kUpb_CType_String: + case kUpb_CType_Bytes: + return UPB_SIZE(3, 4); + } + UPB_UNREACHABLE(); + } + UPB_INLINE void* _upb_Array_Resize_accessor(void* msg, size_t ofs, size_t size, upb_CType type, upb_Arena* arena) + { + return _upb_Array_Resize_accessor2(msg, ofs, size, _upb_sizelg2(type), arena); + } + UPB_INLINE bool _upb_Array_Append_accessor(void* msg, size_t ofs, size_t elem_size, upb_CType type, const void* value, upb_Arena* arena) + { + (void)elem_size; + return _upb_Array_Append_accessor2(msg, ofs, _upb_sizelg2(type), value, arena); + } + + /** upb_Map *******************************************************************/ + + /* Right now we use strmaps for everything. We'll likely want to use + * integer-specific maps for integer-keyed maps.*/ + typedef struct + { + /* Size of key and val, based on the map type. Strings are represented as '0' + * because they must be handled specially. */ + char key_size; + char val_size; + + upb_strtable table; + } upb_Map; + + /* Map entries aren't actually stored, they are only used during parsing. For + * parsing, it helps a lot if all map entry messages have the same layout. + * The compiler and def.c must ensure that all map entries have this layout. */ + typedef struct + { + upb_Message_Internal internal; + union + { + upb_StringView str; /* For str/bytes. */ + upb_value val; /* For all other types. */ + } k; + union + { + upb_StringView str; /* For str/bytes. */ + upb_value val; /* For all other types. */ + } v; + } upb_MapEntry; + + /* Creates a new map on the given arena with this key/value type. */ + upb_Map* _upb_Map_New(upb_Arena* a, size_t key_size, size_t value_size); + + /* Converting between internal table representation and user values. + * + * _upb_map_tokey() and _upb_map_fromkey() are inverses. + * _upb_map_tovalue() and _upb_map_fromvalue() are inverses. + * + * These functions account for the fact that strings are treated differently + * from other types when stored in a map. + */ + + UPB_INLINE upb_StringView _upb_map_tokey(const void* key, size_t size) + { + if (size == UPB_MAPTYPE_STRING) + { + return *(upb_StringView*)key; + } + else + { + return upb_StringView_FromDataAndSize((const char*)key, size); + } + } + + UPB_INLINE void _upb_map_fromkey(upb_StringView key, void* out, size_t size) + { + if (size == UPB_MAPTYPE_STRING) + { + memcpy(out, &key, sizeof(key)); + } + else + { + memcpy(out, key.data, size); + } + } + + UPB_INLINE bool _upb_map_tovalue(const void* val, size_t size, upb_value* msgval, upb_Arena* a) + { + if (size == UPB_MAPTYPE_STRING) + { + upb_StringView* strp = (upb_StringView*)upb_Arena_Malloc(a, sizeof(*strp)); + if (!strp) + return false; + *strp = *(upb_StringView*)val; + *msgval = upb_value_ptr(strp); + } + else + { + memcpy(msgval, val, size); + } + return true; + } + + UPB_INLINE void _upb_map_fromvalue(upb_value val, void* out, size_t size) + { + if (size == UPB_MAPTYPE_STRING) + { + const upb_StringView* strp = (const upb_StringView*)upb_value_getptr(val); + memcpy(out, strp, sizeof(upb_StringView)); + } + else + { + memcpy(out, &val, size); + } + } + + /* Map operations, shared by reflection and generated code. */ + + UPB_INLINE size_t _upb_Map_Size(const upb_Map* map) + { + return map->table.t.count; + } + + UPB_INLINE bool _upb_Map_Get(const upb_Map* map, const void* key, size_t key_size, void* val, size_t val_size) + { + upb_value tabval; + upb_StringView k = _upb_map_tokey(key, key_size); + bool ret = upb_strtable_lookup2(&map->table, k.data, k.size, &tabval); + if (ret && val) + { + _upb_map_fromvalue(tabval, val, val_size); + } + return ret; + } + + UPB_INLINE void* _upb_map_next(const upb_Map* map, size_t* iter) + { + upb_strtable_iter it; + it.t = &map->table; + it.index = *iter; + upb_strtable_next(&it); + *iter = it.index; + if (upb_strtable_done(&it)) + return NULL; + return (void*)str_tabent(&it); + } + + typedef enum + { + // LINT.IfChange + _kUpb_MapInsertStatus_Inserted = 0, + _kUpb_MapInsertStatus_Replaced = 1, + _kUpb_MapInsertStatus_OutOfMemory = 2, + // LINT.ThenChange(//depot/google3/third_party/upb/upb/map.h) + } _upb_MapInsertStatus; + + UPB_INLINE _upb_MapInsertStatus _upb_Map_Insert(upb_Map* map, const void* key, size_t key_size, void* val, size_t val_size, upb_Arena* a) + { + upb_StringView strkey = _upb_map_tokey(key, key_size); + upb_value tabval = {0}; + if (!_upb_map_tovalue(val, val_size, &tabval, a)) + { + return _kUpb_MapInsertStatus_OutOfMemory; + } + + /* TODO(haberman): add overwrite operation to minimize number of lookups. */ + bool removed = + upb_strtable_remove2(&map->table, strkey.data, strkey.size, NULL); + if (!upb_strtable_insert(&map->table, strkey.data, strkey.size, tabval, a)) + { + return _kUpb_MapInsertStatus_OutOfMemory; + } + return removed ? _kUpb_MapInsertStatus_Replaced : _kUpb_MapInsertStatus_Inserted; + } + + UPB_INLINE bool _upb_Map_Delete(upb_Map* map, const void* key, size_t key_size) + { + upb_StringView k = _upb_map_tokey(key, key_size); + return upb_strtable_remove2(&map->table, k.data, k.size, NULL); + } + + UPB_INLINE void _upb_Map_Clear(upb_Map* map) + { + upb_strtable_clear(&map->table); + } + + /* Message map operations, these get the map from the message first. */ + + UPB_INLINE size_t _upb_msg_map_size(const upb_Message* msg, size_t ofs) + { + upb_Map* map = *UPB_PTR_AT(msg, ofs, upb_Map*); + return map ? _upb_Map_Size(map) : 0; + } + + UPB_INLINE bool _upb_msg_map_get(const upb_Message* msg, size_t ofs, const void* key, size_t key_size, void* val, size_t val_size) + { + upb_Map* map = *UPB_PTR_AT(msg, ofs, upb_Map*); + if (!map) + return false; + return _upb_Map_Get(map, key, key_size, val, val_size); + } + + UPB_INLINE void* _upb_msg_map_next(const upb_Message* msg, size_t ofs, size_t* iter) + { + upb_Map* map = *UPB_PTR_AT(msg, ofs, upb_Map*); + if (!map) + return NULL; + return _upb_map_next(map, iter); + } + + UPB_INLINE bool _upb_msg_map_set(upb_Message* msg, size_t ofs, const void* key, size_t key_size, void* val, size_t val_size, upb_Arena* arena) + { + upb_Map** map = UPB_PTR_AT(msg, ofs, upb_Map*); + if (!*map) + { + *map = _upb_Map_New(arena, key_size, val_size); + } + return _upb_Map_Insert(*map, key, key_size, val, val_size, arena) != + _kUpb_MapInsertStatus_OutOfMemory; + } + + UPB_INLINE bool _upb_msg_map_delete(upb_Message* msg, size_t ofs, const void* key, size_t key_size) + { + upb_Map* map = *UPB_PTR_AT(msg, ofs, upb_Map*); + if (!map) + return false; + return _upb_Map_Delete(map, key, key_size); + } + + UPB_INLINE void _upb_msg_map_clear(upb_Message* msg, size_t ofs) + { + upb_Map* map = *UPB_PTR_AT(msg, ofs, upb_Map*); + if (!map) + return; + _upb_Map_Clear(map); + } + + /* Accessing map key/value from a pointer, used by generated code only. */ + + UPB_INLINE void _upb_msg_map_key(const void* msg, void* key, size_t size) + { + const upb_tabent* ent = (const upb_tabent*)msg; + uint32_t u32len; + upb_StringView k; + k.data = upb_tabstr(ent->key, &u32len); + k.size = u32len; + _upb_map_fromkey(k, key, size); + } + + UPB_INLINE void _upb_msg_map_value(const void* msg, void* val, size_t size) + { + const upb_tabent* ent = (const upb_tabent*)msg; + upb_value v = {ent->val.val}; + _upb_map_fromvalue(v, val, size); + } + + UPB_INLINE void _upb_msg_map_set_value(void* msg, const void* val, size_t size) + { + upb_tabent* ent = (upb_tabent*)msg; + /* This is like _upb_map_tovalue() except the entry already exists so we can + * reuse the allocated upb_StringView for string fields. */ + if (size == UPB_MAPTYPE_STRING) + { + upb_StringView* strp = (upb_StringView*)(uintptr_t)ent->val.val; + memcpy(strp, val, sizeof(*strp)); + } + else + { + memcpy(&ent->val.val, val, size); + } + } + + /** _upb_mapsorter ************************************************************/ + + /* _upb_mapsorter sorts maps and provides ordered iteration over the entries. + * Since maps can be recursive (map values can be messages which contain other + * maps). _upb_mapsorter can contain a stack of maps. */ + + typedef struct + { + upb_tabent const** entries; + int size; + int cap; + } _upb_mapsorter; + + typedef struct + { + int start; + int pos; + int end; + } _upb_sortedmap; + + UPB_INLINE void _upb_mapsorter_init(_upb_mapsorter* s) + { + s->entries = NULL; + s->size = 0; + s->cap = 0; + } + + UPB_INLINE void _upb_mapsorter_destroy(_upb_mapsorter* s) + { + if (s->entries) + free(s->entries); + } + + bool _upb_mapsorter_pushmap(_upb_mapsorter* s, upb_FieldType key_type, const upb_Map* map, _upb_sortedmap* sorted); + + UPB_INLINE void _upb_mapsorter_popmap(_upb_mapsorter* s, _upb_sortedmap* sorted) + { + s->size = sorted->start; + } + + UPB_INLINE bool _upb_sortedmap_next(_upb_mapsorter* s, const upb_Map* map, _upb_sortedmap* sorted, upb_MapEntry* ent) + { + if (sorted->pos == sorted->end) + return false; + const upb_tabent* tabent = s->entries[sorted->pos++]; + upb_StringView key = upb_tabstrview(tabent->key); + _upb_map_fromkey(key, &ent->k, map->key_size); + upb_value val = {tabent->val.val}; + _upb_map_fromvalue(val, &ent->v, map->val_size); + return true; + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_MSG_INT_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/port_def.inc b/CAPI/cpp/grpc/include/upb/port_def.inc new file mode 100644 index 00000000..92e4bf24 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/port_def.inc @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This is where we define macros used across upb. + * + * All of these macros are undef'd in port_undef.inc to avoid leaking them to + * users. + * + * The correct usage is: + * + * #include "upb/foobar.h" + * #include "upb/baz.h" + * + * // MUST be last included header. + * #include "upb/port_def.inc" + * + * // Code for this file. + * // <...> + * + * // Can be omitted for .c files, required for .h. + * #include "upb/port_undef.inc" + * + * This file is private and must not be included by users! + */ + +#if !((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) || \ + (defined(_MSC_VER) && _MSC_VER >= 1900)) +#error upb requires C99 or C++11 or MSVC >= 2015. +#endif + +#include +#include + +#if UINTPTR_MAX == 0xffffffff +#define UPB_SIZE(size32, size64) size32 +#else +#define UPB_SIZE(size32, size64) size64 +#endif + +/* If we always read/write as a consistent type to each address, this shouldn't + * violate aliasing. + */ +#define UPB_PTR_AT(msg, ofs, type) ((type*)((char*)(msg) + (ofs))) + +#define UPB_READ_ONEOF(msg, fieldtype, offset, case_offset, case_val, default) \ + *UPB_PTR_AT(msg, case_offset, int) == case_val \ + ? *UPB_PTR_AT(msg, offset, fieldtype) \ + : default + +#define UPB_WRITE_ONEOF(msg, fieldtype, offset, value, case_offset, case_val) \ + *UPB_PTR_AT(msg, case_offset, int) = case_val; \ + *UPB_PTR_AT(msg, offset, fieldtype) = value; + +#define UPB_MAPTYPE_STRING 0 + +/* UPB_INLINE: inline if possible, emit standalone code if required. */ +#ifdef __cplusplus +#define UPB_INLINE inline +#elif defined (__GNUC__) || defined(__clang__) +#define UPB_INLINE static __inline__ +#else +#define UPB_INLINE static +#endif + +#define UPB_MALLOC_ALIGN 8 +#define UPB_ALIGN_UP(size, align) (((size) + (align) - 1) / (align) * (align)) +#define UPB_ALIGN_DOWN(size, align) ((size) / (align) * (align)) +#define UPB_ALIGN_MALLOC(size) UPB_ALIGN_UP(size, UPB_MALLOC_ALIGN) +#define UPB_ALIGN_OF(type) offsetof (struct { char c; type member; }, member) + +/* Hints to the compiler about likely/unlikely branches. */ +#if defined (__GNUC__) || defined(__clang__) +#define UPB_LIKELY(x) __builtin_expect((x),1) +#define UPB_UNLIKELY(x) __builtin_expect((x),0) +#else +#define UPB_LIKELY(x) (x) +#define UPB_UNLIKELY(x) (x) +#endif + +/* Macros for function attributes on compilers that support them. */ +#ifdef __GNUC__ +#define UPB_FORCEINLINE __inline__ __attribute__((always_inline)) +#define UPB_NOINLINE __attribute__((noinline)) +#define UPB_NORETURN __attribute__((__noreturn__)) +#define UPB_PRINTF(str, first_vararg) __attribute__((format (printf, str, first_vararg))) +#elif defined(_MSC_VER) +#define UPB_NOINLINE +#define UPB_FORCEINLINE +#define UPB_NORETURN __declspec(noreturn) +#define UPB_PRINTF(str, first_vararg) +#else /* !defined(__GNUC__) */ +#define UPB_FORCEINLINE +#define UPB_NOINLINE +#define UPB_NORETURN +#define UPB_PRINTF(str, first_vararg) +#endif + +#define UPB_MAX(x, y) ((x) > (y) ? (x) : (y)) +#define UPB_MIN(x, y) ((x) < (y) ? (x) : (y)) + +#define UPB_UNUSED(var) (void)var + +/* UPB_ASSUME(): in release mode, we tell the compiler to assume this is true. + */ +#ifdef NDEBUG +#ifdef __GNUC__ +#define UPB_ASSUME(expr) if (!(expr)) __builtin_unreachable() +#elif defined _MSC_VER +#define UPB_ASSUME(expr) if (!(expr)) __assume(0) +#else +#define UPB_ASSUME(expr) do {} while (false && (expr)) +#endif +#else +#define UPB_ASSUME(expr) assert(expr) +#endif + +/* UPB_ASSERT(): in release mode, we use the expression without letting it be + * evaluated. This prevents "unused variable" warnings. */ +#ifdef NDEBUG +#define UPB_ASSERT(expr) do {} while (false && (expr)) +#else +#define UPB_ASSERT(expr) assert(expr) +#endif + +#if defined(__GNUC__) || defined(__clang__) +#define UPB_UNREACHABLE() do { assert(0); __builtin_unreachable(); } while(0) +#else +#define UPB_UNREACHABLE() do { assert(0); } while(0) +#endif + +/* UPB_SETJMP() / UPB_LONGJMP(): avoid setting/restoring signal mask. */ +#ifdef __APPLE__ +#define UPB_SETJMP(buf) _setjmp(buf) +#define UPB_LONGJMP(buf, val) _longjmp(buf, val) +#else +#define UPB_SETJMP(buf) setjmp(buf) +#define UPB_LONGJMP(buf, val) longjmp(buf, val) +#endif + +/* UPB_PTRADD(ptr, ofs): add pointer while avoiding "NULL + 0" UB */ +#define UPB_PTRADD(ptr, ofs) ((ofs) ? (ptr) + (ofs) : (ptr)) + +/* Configure whether fasttable is switched on or not. *************************/ + +#ifdef __has_attribute +#define UPB_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +#define UPB_HAS_ATTRIBUTE(x) 0 +#endif + +#if UPB_HAS_ATTRIBUTE(musttail) +#define UPB_MUSTTAIL __attribute__((musttail)) +#else +#define UPB_MUSTTAIL +#endif + +#undef UPB_HAS_ATTRIBUTE + +/* This check is not fully robust: it does not require that we have "musttail" + * support available. We need tail calls to avoid consuming arbitrary amounts + * of stack space. + * + * GCC/Clang can mostly be trusted to generate tail calls as long as + * optimization is enabled, but, debug builds will not generate tail calls + * unless "musttail" is available. + * + * We should probably either: + * 1. require that the compiler supports musttail. + * 2. add some fallback code for when musttail isn't available (ie. return + * instead of tail calling). This is safe and portable, but this comes at + * a CPU cost. + */ +#if (defined(__x86_64__) || defined(__aarch64__)) && defined(__GNUC__) +#define UPB_FASTTABLE_SUPPORTED 1 +#else +#define UPB_FASTTABLE_SUPPORTED 0 +#endif + +/* define UPB_ENABLE_FASTTABLE to force fast table support. + * This is useful when we want to ensure we are really getting fasttable, + * for example for testing or benchmarking. */ +#if defined(UPB_ENABLE_FASTTABLE) +#if !UPB_FASTTABLE_SUPPORTED +#error fasttable is x86-64/ARM64 only and requires GCC or Clang. +#endif +#define UPB_FASTTABLE 1 +/* Define UPB_TRY_ENABLE_FASTTABLE to use fasttable if possible. + * This is useful for releasing code that might be used on multiple platforms, + * for example the PHP or Ruby C extensions. */ +#elif defined(UPB_TRY_ENABLE_FASTTABLE) +#define UPB_FASTTABLE UPB_FASTTABLE_SUPPORTED +#else +#define UPB_FASTTABLE 0 +#endif + +/* UPB_FASTTABLE_INIT() allows protos compiled for fasttable to gracefully + * degrade to non-fasttable if we are using UPB_TRY_ENABLE_FASTTABLE. */ +#if !UPB_FASTTABLE && defined(UPB_TRY_ENABLE_FASTTABLE) +#define UPB_FASTTABLE_INIT(...) +#else +#define UPB_FASTTABLE_INIT(...) __VA_ARGS__ +#endif + +#undef UPB_FASTTABLE_SUPPORTED + +/* ASAN poisoning (for arena) *************************************************/ + +#if defined(__SANITIZE_ADDRESS__) +#define UPB_ASAN 1 +#ifdef __cplusplus +extern "C" { +#endif +void __asan_poison_memory_region(void const volatile *addr, size_t size); +void __asan_unpoison_memory_region(void const volatile *addr, size_t size); +#ifdef __cplusplus +} /* extern "C" */ +#endif +#define UPB_POISON_MEMORY_REGION(addr, size) \ + __asan_poison_memory_region((addr), (size)) +#define UPB_UNPOISON_MEMORY_REGION(addr, size) \ + __asan_unpoison_memory_region((addr), (size)) +#else +#define UPB_ASAN 0 +#define UPB_POISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +#define UPB_UNPOISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +#endif + +/* Disable proto2 arena behavior (TEMPORARY) **********************************/ + +#ifdef UPB_DISABLE_PROTO2_ENUM_CHECKING +#define UPB_TREAT_PROTO2_ENUMS_LIKE_PROTO3 1 +#else +#define UPB_TREAT_PROTO2_ENUMS_LIKE_PROTO3 0 +#endif diff --git a/CAPI/cpp/grpc/include/upb/port_undef.inc b/CAPI/cpp/grpc/include/upb/port_undef.inc new file mode 100644 index 00000000..1dff3d27 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/port_undef.inc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* See port_def.inc. This should #undef all macros #defined there. */ + +#undef UPB_SIZE +#undef UPB_PTR_AT +#undef UPB_READ_ONEOF +#undef UPB_WRITE_ONEOF +#undef UPB_MAPTYPE_STRING +#undef UPB_INLINE +#undef UPB_ALIGN_UP +#undef UPB_ALIGN_DOWN +#undef UPB_ALIGN_MALLOC +#undef UPB_ALIGN_OF +#undef UPB_MALLOC_ALIGN +#undef UPB_LIKELY +#undef UPB_UNLIKELY +#undef UPB_FORCEINLINE +#undef UPB_NOINLINE +#undef UPB_NORETURN +#undef UPB_PRINTF +#undef UPB_MAX +#undef UPB_MIN +#undef UPB_UNUSED +#undef UPB_ASSUME +#undef UPB_ASSERT +#undef UPB_UNREACHABLE +#undef UPB_SETJMP +#undef UPB_LONGJMP +#undef UPB_PTRADD +#undef UPB_MUSTTAIL +#undef UPB_FASTTABLE_SUPPORTED +#undef UPB_FASTTABLE +#undef UPB_FASTTABLE_INIT +#undef UPB_POISON_MEMORY_REGION +#undef UPB_UNPOISON_MEMORY_REGION +#undef UPB_ASAN +#undef UPB_TREAT_PROTO2_ENUMS_LIKE_PROTO3 diff --git a/CAPI/cpp/grpc/include/upb/reflection.h b/CAPI/cpp/grpc/include/upb/reflection.h new file mode 100644 index 00000000..36a54e87 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/reflection.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_REFLECTION_H_ +#define UPB_REFLECTION_H_ + +#include "upb/array.h" +#include "upb/def.h" +#include "upb/map.h" +#include "upb/msg.h" +#include "upb/port_def.inc" +#include "upb/upb.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + upb_MessageValue upb_FieldDef_Default(const upb_FieldDef* f); + + /** upb_Message + * *******************************************************************/ + + /* Creates a new message of the given type in the given arena. */ + upb_Message* upb_Message_New(const upb_MessageDef* m, upb_Arena* a); + + /* Returns the value associated with this field. */ + upb_MessageValue upb_Message_Get(const upb_Message* msg, const upb_FieldDef* f); + + /* Returns a mutable pointer to a map, array, or submessage value. If the given + * arena is non-NULL this will construct a new object if it was not previously + * present. May not be called for primitive fields. */ + upb_MutableMessageValue upb_Message_Mutable(upb_Message* msg, const upb_FieldDef* f, upb_Arena* a); + + /* May only be called for fields where upb_FieldDef_HasPresence(f) == true. */ + bool upb_Message_Has(const upb_Message* msg, const upb_FieldDef* f); + + /* Returns the field that is set in the oneof, or NULL if none are set. */ + const upb_FieldDef* upb_Message_WhichOneof(const upb_Message* msg, const upb_OneofDef* o); + + /* Sets the given field to the given value. For a msg/array/map/string, the + * caller must ensure that the target data outlives |msg| (by living either in + * the same arena or a different arena that outlives it). + * + * Returns false if allocation fails. */ + bool upb_Message_Set(upb_Message* msg, const upb_FieldDef* f, upb_MessageValue val, upb_Arena* a); + + /* Clears any field presence and sets the value back to its default. */ + void upb_Message_ClearField(upb_Message* msg, const upb_FieldDef* f); + + /* Clear all data and unknown fields. */ + void upb_Message_Clear(upb_Message* msg, const upb_MessageDef* m); + + /* Iterate over present fields. + * + * size_t iter = kUpb_Message_Begin; + * const upb_FieldDef *f; + * upb_MessageValue val; + * while (upb_Message_Next(msg, m, ext_pool, &f, &val, &iter)) { + * process_field(f, val); + * } + * + * If ext_pool is NULL, no extensions will be returned. If the given symtab + * returns extensions that don't match what is in this message, those extensions + * will be skipped. + */ + +#define kUpb_Message_Begin -1 + bool upb_Message_Next(const upb_Message* msg, const upb_MessageDef* m, const upb_DefPool* ext_pool, const upb_FieldDef** f, upb_MessageValue* val, size_t* iter); + + /* Clears all unknown field data from this message and all submessages. */ + bool upb_Message_DiscardUnknown(upb_Message* msg, const upb_MessageDef* m, int maxdepth); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_REFLECTION_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/reflection.hpp b/CAPI/cpp/grpc/include/upb/reflection.hpp new file mode 100644 index 00000000..57a28f1c --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/reflection.hpp @@ -0,0 +1,38 @@ +// Copyright (c) 2009-2021, Google LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Google LLC nor the +// names of its contributors may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef UPB_REFLECTION_HPP_ +#define UPB_REFLECTION_HPP_ + +#include "upb/reflection.h" + +namespace upb +{ + + typedef upb_MessageValue MessageValue; + +} // namespace upb + +#endif // UPB_REFLECTION_HPP_ diff --git a/CAPI/cpp/grpc/include/upb/status.h b/CAPI/cpp/grpc/include/upb/status.h new file mode 100644 index 00000000..dbdd50c7 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/status.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_STATUS_H_ +#define UPB_STATUS_H_ + +#include +#include + +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define _kUpb_Status_MaxMessage 127 + + typedef struct + { + bool ok; + char msg[_kUpb_Status_MaxMessage]; /* Error message; NULL-terminated. */ + } upb_Status; + + const char* upb_Status_ErrorMessage(const upb_Status* status); + bool upb_Status_IsOk(const upb_Status* status); + + /* These are no-op if |status| is NULL. */ + void upb_Status_Clear(upb_Status* status); + void upb_Status_SetErrorMessage(upb_Status* status, const char* msg); + void upb_Status_SetErrorFormat(upb_Status* status, const char* fmt, ...) + UPB_PRINTF(2, 3); + void upb_Status_VSetErrorFormat(upb_Status* status, const char* fmt, va_list args) UPB_PRINTF(2, 0); + void upb_Status_VAppendErrorFormat(upb_Status* status, const char* fmt, va_list args) UPB_PRINTF(2, 0); + +#include "upb/port_undef.inc" + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_STATUS_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/table_internal.h b/CAPI/cpp/grpc/include/upb/table_internal.h new file mode 100644 index 00000000..7ac8bf78 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/table_internal.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_TABLE_H_ +#define UPB_TABLE_H_ + +// TODO(b/232091617): Delete this entire header which currently exists only for +// temporary backwards compatibility. + +#include "upb/internal/table.h" + +#endif /* UPB_TABLE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/text_encode.h b/CAPI/cpp/grpc/include/upb/text_encode.h new file mode 100644 index 00000000..b0e0fdb5 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/text_encode.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_TEXTENCODE_H_ +#define UPB_TEXTENCODE_H_ + +#include "upb/def.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + enum + { + /* When set, prints everything on a single line. */ + UPB_TXTENC_SINGLELINE = 1, + + /* When set, unknown fields are not printed. */ + UPB_TXTENC_SKIPUNKNOWN = 2, + + /* When set, maps are *not* sorted (this avoids allocating tmp mem). */ + UPB_TXTENC_NOSORT = 4 + }; + + /* Encodes the given |msg| to text format. The message's reflection is given in + * |m|. The symtab in |symtab| is used to find extensions (if NULL, extensions + * will not be printed). + * + * Output is placed in the given buffer, and always NULL-terminated. The output + * size (excluding NULL) is returned. This means that a return value >= |size| + * implies that the output was truncated. (These are the same semantics as + * snprintf()). */ + size_t upb_TextEncode(const upb_Message* msg, const upb_MessageDef* m, const upb_DefPool* ext_pool, int options, char* buf, size_t size); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_TEXTENCODE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/upb.h b/CAPI/cpp/grpc/include/upb/upb.h new file mode 100644 index 00000000..90228e14 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/upb.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This file contains shared definitions that are widely used across upb. + */ + +#ifndef UPB_H_ +#define UPB_H_ + +#include +#include +#include +#include +#include +#include + +// TODO(b/232091617): Remove these and fix everything that breaks as a result. +#include "upb/arena.h" +#include "upb/status.h" + +// Must be last. +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + /** upb_StringView ************************************************************/ + + typedef struct + { + const char* data; + size_t size; + } upb_StringView; + + UPB_INLINE upb_StringView upb_StringView_FromDataAndSize(const char* data, size_t size) + { + upb_StringView ret; + ret.data = data; + ret.size = size; + return ret; + } + + UPB_INLINE upb_StringView upb_StringView_FromString(const char* data) + { + return upb_StringView_FromDataAndSize(data, strlen(data)); + } + + UPB_INLINE bool upb_StringView_IsEqual(upb_StringView a, upb_StringView b) + { + return a.size == b.size && memcmp(a.data, b.data, a.size) == 0; + } + +#define UPB_STRINGVIEW_INIT(ptr, len) \ + { \ + ptr, len \ + } + +#define UPB_STRINGVIEW_FORMAT "%.*s" +#define UPB_STRINGVIEW_ARGS(view) (int)(view).size, (view).data + + /* Constants ******************************************************************/ + + /* A list of types as they are encoded on-the-wire. */ + typedef enum + { + kUpb_WireType_Varint = 0, + kUpb_WireType_64Bit = 1, + kUpb_WireType_Delimited = 2, + kUpb_WireType_StartGroup = 3, + kUpb_WireType_EndGroup = 4, + kUpb_WireType_32Bit = 5 + } upb_WireType; + + /* The types a field can have. Note that this list is not identical to the + * types defined in descriptor.proto, which gives INT32 and SINT32 separate + * types (we distinguish the two with the "integer encoding" enum below). */ + typedef enum + { + kUpb_CType_Bool = 1, + kUpb_CType_Float = 2, + kUpb_CType_Int32 = 3, + kUpb_CType_UInt32 = 4, + kUpb_CType_Enum = 5, /* Enum values are int32. */ + kUpb_CType_Message = 6, + kUpb_CType_Double = 7, + kUpb_CType_Int64 = 8, + kUpb_CType_UInt64 = 9, + kUpb_CType_String = 10, + kUpb_CType_Bytes = 11 + } upb_CType; + + /* The repeated-ness of each field; this matches descriptor.proto. */ + typedef enum + { + kUpb_Label_Optional = 1, + kUpb_Label_Required = 2, + kUpb_Label_Repeated = 3 + } upb_Label; + + /* Descriptor types, as defined in descriptor.proto. */ + typedef enum + { + kUpb_FieldType_Double = 1, + kUpb_FieldType_Float = 2, + kUpb_FieldType_Int64 = 3, + kUpb_FieldType_UInt64 = 4, + kUpb_FieldType_Int32 = 5, + kUpb_FieldType_Fixed64 = 6, + kUpb_FieldType_Fixed32 = 7, + kUpb_FieldType_Bool = 8, + kUpb_FieldType_String = 9, + kUpb_FieldType_Group = 10, + kUpb_FieldType_Message = 11, + kUpb_FieldType_Bytes = 12, + kUpb_FieldType_UInt32 = 13, + kUpb_FieldType_Enum = 14, + kUpb_FieldType_SFixed32 = 15, + kUpb_FieldType_SFixed64 = 16, + kUpb_FieldType_SInt32 = 17, + kUpb_FieldType_SInt64 = 18 + } upb_FieldType; + +#define kUpb_Map_Begin ((size_t)-1) + + UPB_INLINE bool _upb_IsLittleEndian(void) + { + int x = 1; + return *(char*)&x == 1; + } + + UPB_INLINE uint32_t _upb_BigEndian_Swap32(uint32_t val) + { + if (_upb_IsLittleEndian()) + { + return val; + } + else + { + return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | + ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); + } + } + + UPB_INLINE uint64_t _upb_BigEndian_Swap64(uint64_t val) + { + if (_upb_IsLittleEndian()) + { + return val; + } + else + { + return ((uint64_t)_upb_BigEndian_Swap32((uint32_t)val) << 32) | + _upb_BigEndian_Swap32((uint32_t)(val >> 32)); + } + } + + UPB_INLINE int _upb_Log2Ceiling(int x) + { + if (x <= 1) + return 0; +#ifdef __GNUC__ + return 32 - __builtin_clz(x - 1); +#else + int lg2 = 0; + while (1 << lg2 < x) + lg2++; + return lg2; +#endif + } + + UPB_INLINE int _upb_Log2CeilingSize(int x) + { + return 1 << _upb_Log2Ceiling(x); + } + +#include "upb/port_undef.inc" + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/upb.hpp b/CAPI/cpp/grpc/include/upb/upb.hpp new file mode 100644 index 00000000..587194e1 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/upb.hpp @@ -0,0 +1,151 @@ +// Copyright (c) 2009-2021, Google LLC +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Google LLC nor the +// names of its contributors may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef UPB_HPP_ +#define UPB_HPP_ + +#include + +#include "upb/upb.h" + +namespace upb +{ + + class Status + { + public: + Status() + { + upb_Status_Clear(&status_); + } + + upb_Status* ptr() + { + return &status_; + } + + // Returns true if there is no error. + bool ok() const + { + return upb_Status_IsOk(&status_); + } + + // Guaranteed to be NULL-terminated. + const char* error_message() const + { + return upb_Status_ErrorMessage(&status_); + } + + // The error message will be truncated if it is longer than + // _kUpb_Status_MaxMessage-4. + void SetErrorMessage(const char* msg) + { + upb_Status_SetErrorMessage(&status_, msg); + } + void SetFormattedErrorMessage(const char* fmt, ...) + { + va_list args; + va_start(args, fmt); + upb_Status_VSetErrorFormat(&status_, fmt, args); + va_end(args); + } + + // Resets the status to a successful state with no message. + void Clear() + { + upb_Status_Clear(&status_); + } + + private: + upb_Status status_; + }; + + class Arena + { + public: + // A simple arena with no initial memory block and the default allocator. + Arena() : + ptr_(upb_Arena_New(), upb_Arena_Free) + { + } + Arena(char* initial_block, size_t size) : + ptr_(upb_Arena_Init(initial_block, size, &upb_alloc_global), upb_Arena_Free) + { + } + + upb_Arena* ptr() + { + return ptr_.get(); + } + + // Allows this arena to be used as a generic allocator. + // + // The arena does not need free() calls so when using Arena as an allocator + // it is safe to skip them. However they are no-ops so there is no harm in + // calling free() either. + upb_alloc* allocator() + { + return upb_Arena_Alloc(ptr_.get()); + } + + // Add a cleanup function to run when the arena is destroyed. + // Returns false on out-of-memory. + template + bool Own(T* obj) + { + return upb_Arena_AddCleanup(ptr_.get(), obj, [](void* obj) + { delete static_cast(obj); }); + } + + void Fuse(Arena& other) + { + upb_Arena_Fuse(ptr(), other.ptr()); + } + + private: + std::unique_ptr ptr_; + }; + + // InlinedArena seeds the arenas with a predefined amount of memory. No + // heap memory will be allocated until the initial block is exceeded. + template + class InlinedArena : public Arena + { + public: + InlinedArena() : + Arena(initial_block_, N) + { + } + + private: + InlinedArena(const InlinedArena*) = delete; + InlinedArena& operator=(const InlinedArena*) = delete; + + char initial_block_[N]; + }; + +} // namespace upb + +#endif // UPB_HPP_ diff --git a/CAPI/cpp/grpc/include/upb/upb_internal.h b/CAPI/cpp/grpc/include/upb/upb_internal.h new file mode 100644 index 00000000..6c1fec33 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/upb_internal.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_INT_H_ +#define UPB_INT_H_ + +// TODO(b/232091617): Delete this entire header which currently exists only for +// temporary backwards compatibility. + +#include "upb/internal/upb.h" + +#endif /* UPB_INT_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/util/compare.h b/CAPI/cpp/grpc/include/upb/util/compare.h new file mode 100644 index 00000000..d8ec1563 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/util/compare.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_UTIL_COMPARE_H_ +#define UPB_UTIL_COMPARE_H_ + +#include "upb/def.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + // Returns true if unknown fields from the two messages are equal when sorted + // and varints are made canonical. + // + // This function is discouraged, as the comparison is inherently lossy without + // schema data: + // + // 1. We don't know whether delimited fields are sub-messages. Unknown + // sub-messages will therefore not have their fields sorted and varints + // canonicalized. + // 2. We don't know about oneof/non-repeated fields, which should semantically + // discard every value except the last. + + typedef enum + { + kUpb_UnknownCompareResult_Equal = 0, + kUpb_UnknownCompareResult_NotEqual = 1, + kUpb_UnknownCompareResult_OutOfMemory = 2, + kUpb_UnknownCompareResult_MaxDepthExceeded = 3, + } upb_UnknownCompareResult; + + upb_UnknownCompareResult upb_Message_UnknownFieldsAreEqual(const char* buf1, size_t size1, const char* buf2, size_t size2, int max_depth); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_UTIL_COMPARE_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/util/def_to_proto.h b/CAPI/cpp/grpc/include/upb/util/def_to_proto.h new file mode 100644 index 00000000..15cd4e55 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/util/def_to_proto.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_UTIL_DEF_TO_PROTO_H_ +#define UPB_UTIL_DEF_TO_PROTO_H_ + +#include "upb/def.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + // Functions for converting defs back to the equivalent descriptor proto. + // Ultimately the goal is that a round-trip proto->def->proto is lossless. Each + // function returns a new proto created in arena `a`, or NULL if memory + // allocation failed. + google_protobuf_DescriptorProto* upb_MessageDef_ToProto(const upb_MessageDef* m, upb_Arena* a); + google_protobuf_EnumDescriptorProto* upb_EnumDef_ToProto(const upb_EnumDef* e, upb_Arena* a); + google_protobuf_EnumValueDescriptorProto* upb_EnumValueDef_ToProto( + const upb_EnumValueDef* e, upb_Arena* a + ); + google_protobuf_FieldDescriptorProto* upb_FieldDef_ToProto( + const upb_FieldDef* f, upb_Arena* a + ); + google_protobuf_OneofDescriptorProto* upb_OneofDef_ToProto( + const upb_OneofDef* o, upb_Arena* a + ); + google_protobuf_FileDescriptorProto* upb_FileDef_ToProto(const upb_FileDef* f, upb_Arena* a); + google_protobuf_MethodDescriptorProto* upb_MethodDef_ToProto( + const upb_MethodDef* m, upb_Arena* a + ); + google_protobuf_ServiceDescriptorProto* upb_ServiceDef_ToProto( + const upb_ServiceDef* s, upb_Arena* a + ); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* UPB_UTIL_DEF_TO_PROTO_H_ */ diff --git a/CAPI/cpp/grpc/include/upb/util/required_fields.h b/CAPI/cpp/grpc/include/upb/util/required_fields.h new file mode 100644 index 00000000..2d697420 --- /dev/null +++ b/CAPI/cpp/grpc/include/upb/util/required_fields.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2009-2021, Google LLC + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Google LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UPB_UTIL_REQUIRED_FIELDS_H_ +#define UPB_UTIL_REQUIRED_FIELDS_H_ + +#include "upb/def.h" +#include "upb/reflection.h" + +/* Must be last. */ +#include "upb/port_def.inc" + +#ifdef __cplusplus +extern "C" +{ +#endif + + // A FieldPath can be encoded as an array of upb_FieldPathEntry, in the + // following format: + // { {.field = f1}, {.field = f2} } # f1.f2 + // { {.field = f1}, {.index = 5}, {.field = f2} } # f1[5].f2 + // { {.field = f1}, {.key = "abc"}, {.field = f2} } # f1["abc"].f2 + // + // Users must look at the type of `field` to know if an index or map key + // follows. + // + // A field path may be NULL-terminated, in which case a NULL field indicates + // the end of the field path. + typedef union + { + const upb_FieldDef* field; + size_t array_index; + upb_MessageValue map_key; + } upb_FieldPathEntry; + + // Writes a string representing `*path` to `buf` in the following textual + // format: + // foo.bar # Regular fields + // repeated_baz[2].bar # Repeated field + // int32_msg_map[5].bar # Integer-keyed map + // string_msg_map["abc"] # String-keyed map + // bool_msg_map[true] # Bool-keyed map + // + // The input array `*path` must be NULL-terminated. The pointer `*path` will be + // updated to point to one past the terminating NULL pointer of the input array. + // + // The output buffer `buf` will always be NULL-terminated. If the output data + // (including NULL terminator) exceeds `size`, the result will be truncated. + // Returns the string length of the data we attempted to write, excluding the + // terminating NULL. + size_t upb_FieldPath_ToText(upb_FieldPathEntry** path, char* buf, size_t size); + + // Checks whether `msg` or any of its children has unset required fields, + // returning `true` if any are found. `msg` may be NULL, in which case the + // message will be treated as empty. + // + // When this function returns true, `fields` is updated (if non-NULL) to point + // to a heap-allocated array encoding the field paths of the required fields + // that are missing. Each path is terminated with {.field = NULL}, and a final + // {.field = NULL} terminates the list of paths. The caller is responsible for + // freeing this array. + bool upb_util_HasUnsetRequired(const upb_Message* msg, const upb_MessageDef* m, const upb_DefPool* ext_pool, upb_FieldPathEntry** fields); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#include "upb/port_undef.inc" + +#endif /* UPB_UTIL_REQUIRED_FIELDS_H_ */ diff --git a/CAPI/cpp/grpc/include/zconf.h b/CAPI/cpp/grpc/include/zconf.h new file mode 100644 index 00000000..a5989fb4 --- /dev/null +++ b/CAPI/cpp/grpc/include/zconf.h @@ -0,0 +1,553 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2024 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H +/* #undef Z_PREFIX */ +/* #undef Z_HAVE_UNISTD_H */ + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +#define Z_PREFIX_SET + +/* all linked symbols and init macros */ +#define _dist_code z__dist_code +#define _length_code z__length_code +#define _tr_align z__tr_align +#define _tr_flush_bits z__tr_flush_bits +#define _tr_flush_block z__tr_flush_block +#define _tr_init z__tr_init +#define _tr_stored_block z__tr_stored_block +#define _tr_tally z__tr_tally +#define adler32 z_adler32 +#define adler32_combine z_adler32_combine +#define adler32_combine64 z_adler32_combine64 +#define adler32_z z_adler32_z +#ifndef Z_SOLO +#define compress z_compress +#define compress2 z_compress2 +#define compressBound z_compressBound +#endif +#define crc32 z_crc32 +#define crc32_combine z_crc32_combine +#define crc32_combine64 z_crc32_combine64 +#define crc32_combine_gen z_crc32_combine_gen +#define crc32_combine_gen64 z_crc32_combine_gen64 +#define crc32_combine_op z_crc32_combine_op +#define crc32_z z_crc32_z +#define deflate z_deflate +#define deflateBound z_deflateBound +#define deflateCopy z_deflateCopy +#define deflateEnd z_deflateEnd +#define deflateGetDictionary z_deflateGetDictionary +#define deflateInit z_deflateInit +#define deflateInit2 z_deflateInit2 +#define deflateInit2_ z_deflateInit2_ +#define deflateInit_ z_deflateInit_ +#define deflateParams z_deflateParams +#define deflatePending z_deflatePending +#define deflatePrime z_deflatePrime +#define deflateReset z_deflateReset +#define deflateResetKeep z_deflateResetKeep +#define deflateSetDictionary z_deflateSetDictionary +#define deflateSetHeader z_deflateSetHeader +#define deflateTune z_deflateTune +#define deflate_copyright z_deflate_copyright +#define get_crc_table z_get_crc_table +#ifndef Z_SOLO +#define gz_error z_gz_error +#define gz_intmax z_gz_intmax +#define gz_strwinerror z_gz_strwinerror +#define gzbuffer z_gzbuffer +#define gzclearerr z_gzclearerr +#define gzclose z_gzclose +#define gzclose_r z_gzclose_r +#define gzclose_w z_gzclose_w +#define gzdirect z_gzdirect +#define gzdopen z_gzdopen +#define gzeof z_gzeof +#define gzerror z_gzerror +#define gzflush z_gzflush +#define gzfread z_gzfread +#define gzfwrite z_gzfwrite +#define gzgetc z_gzgetc +#define gzgetc_ z_gzgetc_ +#define gzgets z_gzgets +#define gzoffset z_gzoffset +#define gzoffset64 z_gzoffset64 +#define gzopen z_gzopen +#define gzopen64 z_gzopen64 +#ifdef _WIN32 +#define gzopen_w z_gzopen_w +#endif +#define gzprintf z_gzprintf +#define gzputc z_gzputc +#define gzputs z_gzputs +#define gzread z_gzread +#define gzrewind z_gzrewind +#define gzseek z_gzseek +#define gzseek64 z_gzseek64 +#define gzsetparams z_gzsetparams +#define gztell z_gztell +#define gztell64 z_gztell64 +#define gzungetc z_gzungetc +#define gzvprintf z_gzvprintf +#define gzwrite z_gzwrite +#endif +#define inflate z_inflate +#define inflateBack z_inflateBack +#define inflateBackEnd z_inflateBackEnd +#define inflateBackInit z_inflateBackInit +#define inflateBackInit_ z_inflateBackInit_ +#define inflateCodesUsed z_inflateCodesUsed +#define inflateCopy z_inflateCopy +#define inflateEnd z_inflateEnd +#define inflateGetDictionary z_inflateGetDictionary +#define inflateGetHeader z_inflateGetHeader +#define inflateInit z_inflateInit +#define inflateInit2 z_inflateInit2 +#define inflateInit2_ z_inflateInit2_ +#define inflateInit_ z_inflateInit_ +#define inflateMark z_inflateMark +#define inflatePrime z_inflatePrime +#define inflateReset z_inflateReset +#define inflateReset2 z_inflateReset2 +#define inflateResetKeep z_inflateResetKeep +#define inflateSetDictionary z_inflateSetDictionary +#define inflateSync z_inflateSync +#define inflateSyncPoint z_inflateSyncPoint +#define inflateUndermine z_inflateUndermine +#define inflateValidate z_inflateValidate +#define inflate_copyright z_inflate_copyright +#define inflate_fast z_inflate_fast +#define inflate_table z_inflate_table +#ifndef Z_SOLO +#define uncompress z_uncompress +#define uncompress2 z_uncompress2 +#endif +#define zError z_zError +#ifndef Z_SOLO +#define zcalloc z_zcalloc +#define zcfree z_zcfree +#endif +#define zlibCompileFlags z_zlibCompileFlags +#define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +#define Byte z_Byte +#define Bytef z_Bytef +#define alloc_func z_alloc_func +#define charf z_charf +#define free_func z_free_func +#ifndef Z_SOLO +#define gzFile z_gzFile +#endif +#define gz_header z_gz_header +#define gz_headerp z_gz_headerp +#define in_func z_in_func +#define intf z_intf +#define out_func z_out_func +#define uInt z_uInt +#define uIntf z_uIntf +#define uLong z_uLong +#define uLongf z_uLongf +#define voidp z_voidp +#define voidpc z_voidpc +#define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +#define gz_header_s z_gz_header_s +#define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +#define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +#define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +#define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +#ifndef WIN32 +#define WIN32 +#endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +#if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +#ifndef SYS16BIT +#define SYS16BIT +#endif +#endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +#define MAXSEG_64K +#endif +#ifdef MSDOS +#define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +#ifndef STDC +#define STDC +#endif +#if __STDC_VERSION__ >= 199901L +#ifndef STDC99 +#define STDC99 +#endif +#endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +#define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +#define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +#define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +#define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +#define STDC +#endif + +#ifndef STDC +#ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +#define const /* note: need a more gentle solution here */ +#endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +#define z_const const +#else +#define z_const +#endif + +#ifdef Z_SOLO +#ifdef _WIN64 +typedef unsigned long long z_size_t; +#else +typedef unsigned long z_size_t; +#endif +#else +#define z_longlong long long +#if defined(NO_SIZE_T) +typedef unsigned NO_SIZE_T z_size_t; +#elif defined(STDC) +#include +typedef size_t z_size_t; +#else +typedef unsigned long z_size_t; +#endif +#undef z_longlong +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +#ifdef MAXSEG_64K +#define MAX_MEM_LEVEL 8 +#else +#define MAX_MEM_LEVEL 9 +#endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +#define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus about 7 kilobytes + for small objects. +*/ + +/* Type declarations */ + +#ifndef OF /* function prototypes */ +#ifdef STDC +#define OF(args) args +#else +#define OF(args) () +#endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +#if defined(M_I86SM) || defined(M_I86MM) +/* MSC small or medium model */ +#define SMALL_MEDIUM +#ifdef _MSC_VER +#define FAR _far +#else +#define FAR far +#endif +#endif +#if (defined(__SMALL__) || defined(__MEDIUM__)) +/* Turbo C small or medium model */ +#define SMALL_MEDIUM +#ifdef __BORLANDC__ +#define FAR _far +#else +#define FAR far +#endif +#endif +#endif + +#if defined(WINDOWS) || defined(WIN32) +/* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +#if 0 +#if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +#ifdef ZLIB_INTERNAL +#define ZEXTERN extern __declspec(dllexport) +#else +#define ZEXTERN extern __declspec(dllimport) +#endif +#endif +#endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +#ifdef ZLIB_WINAPI +#ifdef FAR +#undef FAR +#endif +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +/* No need for _export, use ZLIB.DEF instead. */ +/* For complete Windows compatibility, use WINAPI, not __stdcall. */ +#define ZEXPORT WINAPI +#ifdef WIN32 +#define ZEXPORTVA WINAPIV +#else +#define ZEXPORTVA FAR CDECL +#endif +#endif +#endif + +#if defined(__BEOS__) +#if 0 +#ifdef ZLIB_INTERNAL +#define ZEXPORT __declspec(dllexport) +#define ZEXPORTVA __declspec(dllexport) +#else +#define ZEXPORT __declspec(dllimport) +#define ZEXPORTVA __declspec(dllimport) +#endif +#endif +#endif + +#ifndef ZEXTERN +#define ZEXTERN extern +#endif +#ifndef ZEXPORT +#define ZEXPORT +#endif +#ifndef ZEXPORTVA +#define ZEXPORTVA +#endif + +#ifndef FAR +#define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM +/* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +#define Bytef Byte FAR +#else +typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC +typedef void const* voidpc; +typedef void FAR* voidpf; +typedef void* voidp; +#else +typedef Byte const* voidpc; +typedef Byte FAR* voidpf; +typedef Byte* voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +#include +#if (UINT_MAX == 0xffffffffUL) +#define Z_U4 unsigned +#elif (ULONG_MAX == 0xffffffffUL) +#define Z_U4 unsigned long +#elif (USHRT_MAX == 0xffffffffUL) +#define Z_U4 unsigned short +#endif +#endif + +#ifdef Z_U4 +typedef Z_U4 z_crc_t; +#else +typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +#if ~(~HAVE_UNISTD_H + 0) == 0 && ~(~HAVE_UNISTD_H + 1) == 1 +#define Z_HAVE_UNISTD_H +#elif HAVE_UNISTD_H != 0 +#define Z_HAVE_UNISTD_H +#endif +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +#if ~(~HAVE_STDARG_H + 0) == 0 && ~(~HAVE_STDARG_H + 1) == 1 +#define Z_HAVE_STDARG_H +#elif HAVE_STDARG_H != 0 +#define Z_HAVE_STDARG_H +#endif +#endif + +#ifdef STDC +#ifndef Z_SOLO +#include /* for off_t */ +#endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +#ifndef Z_SOLO +#include /* for va_list */ +#endif +#endif + +#ifdef _WIN32 +#ifndef Z_SOLO +#include /* for wchar_t */ +#endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +#undef _LARGEFILE64_SOURCE +#endif + +#ifndef Z_HAVE_UNISTD_H +#ifdef __WATCOMC__ +#define Z_HAVE_UNISTD_H +#endif +#endif +#ifndef Z_HAVE_UNISTD_H +#if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32) +#define Z_HAVE_UNISTD_H +#endif +#endif +#ifndef Z_SOLO +#if defined(Z_HAVE_UNISTD_H) +#include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +#ifdef VMS +#include /* for off_t */ +#endif +#ifndef z_off_t +#define z_off_t off_t +#endif +#endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE - 0 +#define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +#define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS - 0 == 64 && defined(Z_LFS64) +#define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +#define SEEK_SET 0 /* Seek from beginning of file. */ +#define SEEK_CUR 1 /* Seek from current position. */ +#define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +#define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +#define z_off64_t off64_t +#else +#if defined(_WIN32) && !defined(__GNUC__) +#define z_off64_t __int64 +#else +#define z_off64_t z_off_t +#endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) +#pragma map(deflateInit_, "DEIN") +#pragma map(deflateInit2_, "DEIN2") +#pragma map(deflateEnd, "DEEND") +#pragma map(deflateBound, "DEBND") +#pragma map(inflateInit_, "ININ") +#pragma map(inflateInit2_, "ININ2") +#pragma map(inflateEnd, "INEND") +#pragma map(inflateSync, "INSY") +#pragma map(inflateSetDictionary, "INSEDI") +#pragma map(compressBound, "CMBND") +#pragma map(inflate_table, "INTABL") +#pragma map(inflate_fast, "INFA") +#pragma map(inflate_copyright, "INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/CAPI/cpp/grpc/include/zlib.h b/CAPI/cpp/grpc/include/zlib.h new file mode 100644 index 00000000..2910bbb7 --- /dev/null +++ b/CAPI/cpp/grpc/include/zlib.h @@ -0,0 +1,1878 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.3.1, January 22nd, 2024 + + Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define ZLIB_VERSION "1.3.1" +#define ZLIB_VERNUM 0x1310 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 3 +#define ZLIB_VER_REVISION 1 +#define ZLIB_VER_SUBREVISION 0 + + /* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip and raw deflate streams in + memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in the case of corrupted input. + */ + + typedef voidpf (*alloc_func)(voidpf opaque, uInt items, uInt size); + typedef void (*free_func)(voidpf opaque, voidpf address); + + struct internal_state; + + typedef struct z_stream_s + { + z_const Bytef* next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef* next_out; /* next output byte will go here */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char* msg; /* last error message, NULL if no error */ + struct internal_state FAR* state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text + for deflate, or the decoding state for inflate */ + uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ + } z_stream; + + typedef z_stream FAR* z_streamp; + + /* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. + */ + typedef struct gz_header_s + { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef* extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef* name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef* comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ + } gz_header; + + typedef gz_header FAR* gz_headerp; + + /* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. In that case, zlib is thread-safe. When zalloc and zfree are + Z_NULL on entry to the initialization function, they are set to internal + routines that use the standard library functions malloc() and free(). + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use by the decompressor (particularly + if the decompressor wants to decompress everything in a single step). + */ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 + /* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) + /* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) + /* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 + /* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 + /* Possible values of the data_type field for deflate() */ + +#define Z_DEFLATED 8 + /* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() + /* for compatibility with versions < 1.0.2 */ + + /* basic functions */ + + ZEXTERN const char* ZEXPORT zlibVersion(void); + /* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + + /* + ZEXTERN int ZEXPORT deflateInit(z_streamp strm, int level); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. total_in, total_out, adler, and msg are initialized. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). + */ + + ZEXTERN int ZEXPORT deflate(z_streamp strm, int flush); + /* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary. Some output may be provided even if + flush is zero. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. See deflatePending(), + which can be used if desired to determine whether or not there is more output + in that case. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed + codes block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six when the flush marker begins, in order to avoid + repeated flush markers upon calling deflate() again when avail_out == 0. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this + function must be called again with Z_FINISH and more output space (updated + avail_out) but no more input data, until it returns with Z_STREAM_END or an + error. After deflate has returned Z_STREAM_END, the only possible operations + on the stream are deflateReset or deflateEnd. + + Z_FINISH can be used in the first deflate call after deflateInit if all the + compression is to be done in a single step. In order to complete in one + call, avail_out must be at least the value returned by deflateBound (see + below). Then deflate is guaranteed to return Z_STREAM_END. If not enough + output space is provided, deflate will not return Z_STREAM_END, and it must + be called again as described above. + + deflate() sets strm->adler to the Adler-32 checksum of all input read + so far (that is, total_in bytes). If a gzip stream is being generated, then + strm->adler will be the CRC-32 checksum of the input read so far. (See + deflateInit2 below.) + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is + considered binary. This field is only for information purposes and does not + affect the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL or the state was inadvertently written over + by the application), or Z_BUF_ERROR if no progress is possible (for example + avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and + deflate() can be called again with more input and more output space to + continue compressing. + */ + + ZEXTERN int ZEXPORT deflateEnd(z_streamp strm); + /* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). + */ + + /* + ZEXTERN int ZEXPORT inflateInit(z_streamp strm); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. In the current version of inflate, the provided input is not + read or consumed. The allocation of a sliding window will be deferred to + the first call of inflate (if the decompression does not complete on the + first call). If zalloc and zfree are set to Z_NULL, inflateInit updates + them to use default allocation functions. total_in, total_out, adler, and + msg are initialized. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression. + Actual decompression will be done by inflate(). So next_in, and avail_in, + next_out, and avail_out are unused and unchanged. The current + implementation of inflateInit() does not process any header information -- + that is deferred until inflate() is called. + */ + + ZEXTERN int ZEXPORT inflate(z_streamp strm, int flush); + /* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), then next_in and avail_in are updated + accordingly, and processing will resume at this point for the next call of + inflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. If the + caller of inflate() does not provide both available input and available + output space, it is possible that there will be no progress made. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + To assist in this, on return inflate() always sets strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed Adler-32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained unless inflateGetHeader() is used. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + produced so far. The CRC-32 is checked against the gzip trailer, as is the + uncompressed length, modulo 2^32. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value, in which case strm->msg points to a string with a more specific + error), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL, or the state was inadvertently written over + by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR + if no progress was possible or if there was not enough room in the output + buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is to be attempted. + */ + + ZEXTERN int ZEXPORT inflateEnd(z_streamp strm); + /* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state + was inconsistent. + */ + + /* Advanced functions */ + + /* + The following functions are needed only in some special applications. + */ + + /* + ZEXTERN int ZEXPORT deflateInit2(z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy); + + This is another version of deflateInit with more compression options. The + fields zalloc, zfree and opaque must be initialized before by the caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + For the current implementation of deflate(), a windowBits value of 8 (a + window size of 256 bytes) is not supported. As a result, a request for 8 + will result in 9 (a 512-byte window). In that case, providing 8 to + inflateInit2() will result in an error when the zlib header with 9 is + checked against the initialization of inflate(). The remedy is to not use 8 + with deflateInit2() with this initialization, or at least in that case use 9 + with inflateInit2(). + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute a check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to the appropriate value, + if the operating system was determined at compile time. If a gzip stream is + being written, strm->adler is a CRC-32 instead of an Adler-32. + + For raw deflate or gzip encoding, a request for a 256-byte window is + rejected as invalid, since only the zlib header provides a means of + transmitting the window size to the decompressor. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). + */ + + ZEXTERN int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef* dictionary, uInt dictLength); + /* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the Adler-32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler-32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + Adler-32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). + */ + + ZEXTERN int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef* dictionary, uInt* dictLength); + /* + Returns the sliding dictionary being maintained by deflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If deflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similarly, if dictLength is Z_NULL, then it is not set. + + deflateGetDictionary() may return a length less than the window size, even + when more than the window size in input has been provided. It may return up + to 258 bytes less in that case, due to how zlib's implementation of deflate + manages the sliding window and lookahead for matches, where matches can be + up to 258 bytes long. If the application needs the last window-size bytes of + input, then that would need to be saved by the application outside of zlib. + + deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. + */ + + ZEXTERN int ZEXPORT deflateCopy(z_streamp dest, z_streamp source); + /* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. + */ + + ZEXTERN int ZEXPORT deflateReset(z_streamp strm); + /* + This function is equivalent to deflateEnd followed by deflateInit, but + does not free and reallocate the internal compression state. The stream + will leave the compression level and any other attributes that may have been + set unchanged. total_in, total_out, adler, and msg are initialized. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). + */ + + ZEXTERN int ZEXPORT deflateParams(z_streamp strm, int level, int strategy); + /* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2(). This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression approach (which is a function of the level) or the + strategy is changed, and if there have been any deflate() calls since the + state was initialized or reset, then the input available so far is + compressed with the old level and strategy using deflate(strm, Z_BLOCK). + There are three approaches for the compression levels 0, 1..3, and 4..9 + respectively. The new level and strategy will take effect at the next call + of deflate(). + + If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does + not have enough output space to complete, then the parameter change will not + take effect. In this case, deflateParams() can be called again with the + same parameters and more output space to try again. + + In order to assure a change in the parameters on the first try, the + deflate stream should be flushed using deflate() with Z_BLOCK or other flush + request until strm.avail_out is not zero, before calling deflateParams(). + Then no more input data should be provided before the deflateParams() call. + If this is done, the old level and strategy will be applied to the data + compressed before deflateParams(), and the new level and strategy will be + applied to the data compressed after deflateParams(). + + deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream + state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if + there was not enough output space to complete the compression of the + available input data before a change in the strategy or approach. Note that + in the case of a Z_BUF_ERROR, the parameters are not changed. A return + value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be + retried with more output space. + */ + + ZEXTERN int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy, int nice_length, int max_chain); + /* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + + ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen); + /* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. + */ + + ZEXTERN int ZEXPORT deflatePending(z_streamp strm, unsigned* pending, int* bits); + /* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + + ZEXTERN int ZEXPORT deflatePrime(z_streamp strm, int bits, int value); + /* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. + */ + + ZEXTERN int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head); + /* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to the current operating system, with no + extra, name, or comment fields. The gzip header is returned to the default + state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + + /* + ZEXTERN int ZEXPORT inflateInit2(z_streamp strm, + int windowBits); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an Adler-32 or a CRC-32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see + below), inflate() will *not* automatically decode concatenated gzip members. + inflate() will return Z_STREAM_END at the end of the gzip member. The state + would need to be reset to continue decoding a subsequent gzip member. This + *must* be done if there is more data after a gzip member, in order for the + decompression to be compliant with the gzip standard (RFC 1952). + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. + */ + + ZEXTERN int ZEXPORT inflateSetDictionary(z_streamp strm, const Bytef* dictionary, uInt dictLength); + /* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the Adler-32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect Adler-32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). + */ + + ZEXTERN int ZEXPORT inflateGetDictionary(z_streamp strm, Bytef* dictionary, uInt* dictLength); + /* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similarly, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. + */ + + ZEXTERN int ZEXPORT inflateSync(z_streamp strm); + /* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current value of total_in + which indicates where valid compressed data was found. In the error case, + the application may repeatedly call inflateSync, providing more input each + time, until success or end of the input data. + */ + + ZEXTERN int ZEXPORT inflateCopy(z_streamp dest, z_streamp source); + /* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. + */ + + ZEXTERN int ZEXPORT inflateReset(z_streamp strm); + /* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + total_in, total_out, adler, and msg are initialized. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). + */ + + ZEXTERN int ZEXPORT inflateReset2(z_streamp strm, int windowBits); + /* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. If the window size is changed, then the + memory allocated for the window is freed, and the window will be reallocated + by inflate() if needed. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. + */ + + ZEXTERN int ZEXPORT inflatePrime(z_streamp strm, int bits, int value); + /* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + + ZEXTERN long ZEXPORT inflateMark(z_streamp strm); + /* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above, or -65536 if the provided + source stream state was inconsistent. + */ + + ZEXTERN int ZEXPORT inflateGetHeader(z_streamp strm, gz_headerp head); + /* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + + /* + ZEXTERN int ZEXPORT inflateBackInit(z_streamp strm, int windowBits, + unsigned char FAR *window); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. + */ + + typedef unsigned (*in_func)(void FAR*, z_const unsigned char FAR* FAR*); + typedef int (*out_func)(void FAR*, unsigned char FAR*, unsigned); + + ZEXTERN int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR* in_desc, out_func out, void FAR* out_desc); + /* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the default + behavior of inflate(), which expects a zlib header and trailer around the + deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero -- buf is ignored in that + case -- and inflateBack() will return a buffer error. inflateBack() will + call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. + out() should return zero on success, or non-zero on failure. If out() + returns non-zero, inflateBack() will return with an error. Neither in() nor + out() are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. + */ + + ZEXTERN int ZEXPORT inflateBackEnd(z_streamp strm); + /* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. + */ + + ZEXTERN uLong ZEXPORT zlibCompileFlags(void); + /* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: ZLIB_DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + + /* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. + */ + + ZEXTERN int ZEXPORT compress(Bytef* dest, uLongf* destLen, const Bytef* source, uLong sourceLen); + /* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. compress() is equivalent to compress2() with a level + parameter of Z_DEFAULT_COMPRESSION. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. + */ + + ZEXTERN int ZEXPORT compress2(Bytef* dest, uLongf* destLen, const Bytef* source, uLong sourceLen, int level); + /* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. + */ + + ZEXTERN uLong ZEXPORT compressBound(uLong sourceLen); + /* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. + */ + + ZEXTERN int ZEXPORT uncompress(Bytef* dest, uLongf* destLen, const Bytef* source, uLong sourceLen); + /* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed data. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. + */ + + ZEXTERN int ZEXPORT uncompress2(Bytef* dest, uLongf* destLen, const Bytef* source, uLong* sourceLen); + /* + Same as uncompress, except that sourceLen is a pointer, where the + length of the source is *sourceLen. On return, *sourceLen is the number of + source bytes consumed. + */ + + /* gzip file access functions */ + + /* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. + */ + + typedef struct gzFile_s* gzFile; /* semi-opaque gzip file descriptor */ + + /* + ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode); + + Open the gzip (.gz) file at path for reading and decompressing, or + compressing and writing. The mode parameter is as in fopen ("rb" or "wb") + but can also include a compression level ("wb9") or a strategy: 'f' for + filtered data as in "wb6f", 'h' for Huffman-only compression as in "wb1h", + 'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression + as in "wb9F". (See the description of deflateInit2 for more information + about the strategy parameter.) 'T' will request transparent writing or + appending with no compression and not using the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. + */ + + ZEXTERN gzFile ZEXPORT gzdopen(int fd, const char* mode); + /* + Associate a gzFile with the file descriptor fd. File descriptors are + obtained from calls like open, dup, creat, pipe or fileno (if the file has + been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). + */ + + ZEXTERN int ZEXPORT gzbuffer(gzFile file, unsigned size); + /* + Set the internal buffer size used by this library's functions for file to + size. The default buffer size is 8192 bytes. This function must be called + after gzopen() or gzdopen(), and before any other calls that read or write + the file. The buffer memory allocation is always deferred to the first read + or write. Three times that size in buffer space is allocated. A larger + buffer size of, for example, 64K or 128K bytes will noticeably increase the + speed of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. + */ + + ZEXTERN int ZEXPORT gzsetparams(gzFile file, int level, int strategy); + /* + Dynamically update the compression level and strategy for file. See the + description of deflateInit2 for the meaning of these parameters. Previously + provided data is flushed before applying the parameter changes. + + gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not + opened for writing, Z_ERRNO if there is an error writing the flushed data, + or Z_MEM_ERROR if there is a memory allocation error. + */ + + ZEXTERN int ZEXPORT gzread(gzFile file, voidp buf, unsigned len); + /* + Read and decompress up to len uncompressed bytes from file into buf. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. If len is too large to fit in an int, + then nothing is read, -1 is returned, and the error state is set to + Z_STREAM_ERROR. + */ + + ZEXTERN z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems, gzFile file); + /* + Read and decompress up to nitems items of size size from file into buf, + otherwise operating as gzread() does. This duplicates the interface of + stdio's fread(), with size_t request and return types. If the library + defines size_t, then z_size_t is identical to size_t. If not, then z_size_t + is an unsigned integer type that can contain a pointer. + + gzfread() returns the number of full items read of size size, or zero if + the end of the file was reached and a full item could not be read, or if + there was an error. gzerror() must be consulted if zero is returned in + order to determine if there was an error. If the multiplication of size and + nitems overflows, i.e. the product does not fit in a z_size_t, then nothing + is read, zero is returned, and the error state is set to Z_STREAM_ERROR. + + In the event that the end of file is reached and only a partial item is + available at the end, i.e. the remaining uncompressed data length is not a + multiple of size, then the final partial item is nevertheless read into buf + and the end-of-file flag is set. The length of the partial item read is not + provided, but could be inferred from the result of gztell(). This behavior + is the same as the behavior of fread() implementations in common libraries, + but it prevents the direct use of gzfread() to read a concurrently written + file, resetting and retrying on end-of-file, when size is not 1. + */ + + ZEXTERN int ZEXPORT gzwrite(gzFile file, voidpc buf, unsigned len); + /* + Compress and write the len uncompressed bytes at buf to file. gzwrite + returns the number of uncompressed bytes written or 0 in case of error. + */ + + ZEXTERN z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size, z_size_t nitems, gzFile file); + /* + Compress and write nitems items of size size from buf to file, duplicating + the interface of stdio's fwrite(), with size_t request and return types. If + the library defines size_t, then z_size_t is identical to size_t. If not, + then z_size_t is an unsigned integer type that can contain a pointer. + + gzfwrite() returns the number of full items written of size size, or zero + if there was an error. If the multiplication of size and nitems overflows, + i.e. the product does not fit in a z_size_t, then nothing is written, zero + is returned, and the error state is set to Z_STREAM_ERROR. + */ + + ZEXTERN int ZEXPORTVA gzprintf(gzFile file, const char* format, ...); + /* + Convert, format, compress, and write the arguments (...) to file under + control of the string format, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or a negative zlib error code in case + of error. The number of uncompressed bytes written is limited to 8191, or + one less than the buffer size given to gzbuffer(). The caller should assure + that this limit is not exceeded. If it is exceeded, then gzprintf() will + return an error (0) with nothing written. In this case, there may also be a + buffer overflow with unpredictable consequences, which is possible only if + zlib was compiled with the insecure functions sprintf() or vsprintf(), + because the secure snprintf() or vsnprintf() functions were not available. + This can be determined using zlibCompileFlags(). + */ + + ZEXTERN int ZEXPORT gzputs(gzFile file, const char* s); + /* + Compress and write the given null-terminated string s to file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. + */ + + ZEXTERN char* ZEXPORT gzgets(gzFile file, char* buf, int len); + /* + Read and decompress bytes from file into buf, until len-1 characters are + read, or until a newline character is read and transferred to buf, or an + end-of-file condition is encountered. If any characters are read or if len + is one, the string is terminated with a null character. If no characters + are read due to an end-of-file or len is less than one, then the buffer is + left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. + */ + + ZEXTERN int ZEXPORT gzputc(gzFile file, int c); + /* + Compress and write c, converted to an unsigned char, into file. gzputc + returns the value that was written, or -1 in case of error. + */ + + ZEXTERN int ZEXPORT gzgetc(gzFile file); + /* + Read and decompress one byte from file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. + */ + + ZEXTERN int ZEXPORT gzungetc(int c, gzFile file); + /* + Push c back onto the stream for file to be read as the first character on + the next read. At least one character of push-back is always allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). + */ + + ZEXTERN int ZEXPORT gzflush(gzFile file, int flush); + /* + Flush all pending output to file. The parameter flush is as in the + deflate() function. The return value is the zlib error number (see function + gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatenated gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. + */ + + /* + ZEXTERN z_off_t ZEXPORT gzseek(gzFile file, + z_off_t offset, int whence); + + Set the starting position to offset relative to whence for the next gzread + or gzwrite on file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. + */ + + ZEXTERN int ZEXPORT gzrewind(gzFile file); + /* + Rewind file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET). + */ + + /* + ZEXTERN z_off_t ZEXPORT gztell(gzFile file); + + Return the starting position for the next gzread or gzwrite on file. + This position represents a number of bytes in the uncompressed data stream, + and is zero when starting, even if appending or reading a gzip stream from + the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) + */ + + /* + ZEXTERN z_off_t ZEXPORT gzoffset(gzFile file); + + Return the current compressed (actual) read or write offset of file. This + offset includes the count of bytes that precede the gzip stream, for example + when appending or when using gzdopen() for reading. When reading, the + offset does not include as yet unused buffered input. This information can + be used for a progress indicator. On error, gzoffset() returns -1. + */ + + ZEXTERN int ZEXPORT gzeof(gzFile file); + /* + Return true (1) if the end-of-file indicator for file has been set while + reading, false (0) otherwise. Note that the end-of-file indicator is set + only if the read tried to go past the end of the input, but came up short. + Therefore, just like feof(), gzeof() may return false even if there is no + more data to read, in the event that the last read request was for the exact + number of bytes remaining in the input file. This will happen if the input + file size is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. + */ + + ZEXTERN int ZEXPORT gzdirect(gzFile file); + /* + Return true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) + */ + + ZEXTERN int ZEXPORT gzclose(gzFile file); + /* + Flush all pending output for file, if necessary, close file and + deallocate the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. + */ + + ZEXTERN int ZEXPORT gzclose_r(gzFile file); + ZEXTERN int ZEXPORT gzclose_w(gzFile file); + /* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. + */ + + ZEXTERN const char* ZEXPORT gzerror(gzFile file, int* errnum); + /* + Return the error message for the last error which occurred on file. + errnum is set to zlib error number. If an error occurred in the file system + and not in the compression library, errnum is set to Z_ERRNO and the + application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. + */ + + ZEXTERN void ZEXPORT gzclearerr(gzFile file); + /* + Clear the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. + */ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + + /* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. + */ + + ZEXTERN uLong ZEXPORT adler32(uLong adler, const Bytef* buf, uInt len); + /* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. An Adler-32 value is in the range of a 32-bit + unsigned integer. If buf is Z_NULL, this function returns the required + initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); + */ + + ZEXTERN uLong ZEXPORT adler32_z(uLong adler, const Bytef* buf, z_size_t len); + /* + Same as adler32(), but with a size_t length. + */ + + /* + ZEXTERN uLong ZEXPORT adler32_combine(uLong adler1, uLong adler2, + z_off_t len2); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. + */ + + ZEXTERN uLong ZEXPORT crc32(uLong crc, const Bytef* buf, uInt len); + /* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. A CRC-32 value is in the range of a 32-bit unsigned integer. + If buf is Z_NULL, this function returns the required initial value for the + crc. Pre- and post-conditioning (one's complement) is performed within this + function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); + */ + + ZEXTERN uLong ZEXPORT crc32_z(uLong crc, const Bytef* buf, z_size_t len); + /* + Same as crc32(), but with a size_t length. + */ + + /* + ZEXTERN uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. len2 must be non-negative. + */ + + /* + ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t len2); + + Return the operator corresponding to length len2, to be used with + crc32_combine_op(). len2 must be non-negative. + */ + + ZEXTERN uLong ZEXPORT crc32_combine_op(uLong crc1, uLong crc2, uLong op); + /* + Give the same result as crc32_combine(), using op in place of len2. op is + is generated from len2 by crc32_combine_gen(). This will be faster than + crc32_combine() if the generated op is used more than once. + */ + + /* various hacks, don't look :) */ + + /* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ + ZEXTERN int ZEXPORT deflateInit_(z_streamp strm, int level, const char* version, int stream_size); + ZEXTERN int ZEXPORT inflateInit_(z_streamp strm, const char* version, int stream_size); + ZEXTERN int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char* version, int stream_size); + ZEXTERN int ZEXPORT inflateInit2_(z_streamp strm, int windowBits, const char* version, int stream_size); + ZEXTERN int ZEXPORT inflateBackInit_(z_streamp strm, int windowBits, unsigned char FAR* window, const char* version, int stream_size); +#ifdef Z_PREFIX_SET +#define z_deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +#define z_inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +#define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm), (level), (method), (windowBits), (memLevel), (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +#define z_inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, (int)sizeof(z_stream)) +#define z_inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), ZLIB_VERSION, (int)sizeof(z_stream)) +#else +#define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm), (level), (method), (windowBits), (memLevel), (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, (int)sizeof(z_stream)) +#define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), ZLIB_VERSION, (int)sizeof(z_stream)) +#endif + +#ifndef Z_SOLO + + /* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ + struct gzFile_s + { + unsigned have; + unsigned char* next; + z_off64_t pos; + }; + ZEXTERN int ZEXPORT gzgetc_(gzFile file); /* backward compatibility */ +#ifdef Z_PREFIX_SET +#undef z_gzgetc +#define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#else +#define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64(const char*, const char*); + ZEXTERN z_off64_t ZEXPORT gzseek64(gzFile, z_off64_t, int); + ZEXTERN z_off64_t ZEXPORT gztell64(gzFile); + ZEXTERN z_off64_t ZEXPORT gzoffset64(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off64_t); + ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off64_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off64_t); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +#ifdef Z_PREFIX_SET +#define z_gzopen z_gzopen64 +#define z_gzseek z_gzseek64 +#define z_gztell z_gztell64 +#define z_gzoffset z_gzoffset64 +#define z_adler32_combine z_adler32_combine64 +#define z_crc32_combine z_crc32_combine64 +#define z_crc32_combine_gen z_crc32_combine_gen64 +#else +#define gzopen gzopen64 +#define gzseek gzseek64 +#define gztell gztell64 +#define gzoffset gzoffset64 +#define adler32_combine adler32_combine64 +#define crc32_combine crc32_combine64 +#define crc32_combine_gen crc32_combine_gen64 +#endif +#ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64(const char*, const char*); + ZEXTERN z_off_t ZEXPORT gzseek64(gzFile, z_off_t, int); + ZEXTERN z_off_t ZEXPORT gztell64(gzFile); + ZEXTERN z_off_t ZEXPORT gzoffset64(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t); +#endif +#else + ZEXTERN gzFile ZEXPORT gzopen(const char*, const char*); + ZEXTERN z_off_t ZEXPORT gzseek(gzFile, z_off_t, int); + ZEXTERN z_off_t ZEXPORT gztell(gzFile); + ZEXTERN z_off_t ZEXPORT gzoffset(gzFile); + ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); + ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); +#endif + +#else /* Z_SOLO */ + +ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); +ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); +ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); + +#endif /* !Z_SOLO */ + + /* undocumented functions */ + ZEXTERN const char* ZEXPORT zError(int); + ZEXTERN int ZEXPORT inflateSyncPoint(z_streamp); + ZEXTERN const z_crc_t FAR* ZEXPORT get_crc_table(void); + ZEXTERN int ZEXPORT inflateUndermine(z_streamp, int); + ZEXTERN int ZEXPORT inflateValidate(z_streamp, int); + ZEXTERN unsigned long ZEXPORT inflateCodesUsed(z_streamp); + ZEXTERN int ZEXPORT inflateResetKeep(z_streamp); + ZEXTERN int ZEXPORT deflateResetKeep(z_streamp); +#if defined(_WIN32) && !defined(Z_SOLO) + ZEXTERN gzFile ZEXPORT gzopen_w(const wchar_t* path, const char* mode); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +#ifndef Z_SOLO + ZEXTERN int ZEXPORTVA gzvprintf(gzFile file, const char* format, va_list va); +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff --git a/CAPI/cpp/proto/Message2Clients.pb.cc b/CAPI/cpp/proto/Message2Clients.pb.cc index da1b0063..f4bbb2b1 100644 --- a/CAPI/cpp/proto/Message2Clients.pb.cc +++ b/CAPI/cpp/proto/Message2Clients.pb.cc @@ -4,82 +4,29 @@ #include "Message2Clients.pb.h" #include -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/extension_set.h" -#include "google/protobuf/wire_format_lite.h" -#include "google/protobuf/descriptor.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/reflection_ops.h" -#include "google/protobuf/wire_format.h" + +#include +#include +#include +#include +#include +#include +#include // @@protoc_insertion_point(includes) +#include -// Must be included last. -#include "google/protobuf/port_def.inc" PROTOBUF_PRAGMA_INIT_SEG + namespace _pb = ::PROTOBUF_NAMESPACE_ID; -namespace _pbi = ::PROTOBUF_NAMESPACE_ID::internal; +namespace _pbi = _pb::internal; + namespace protobuf { - template PROTOBUF_CONSTEXPR MessageOfShip::MessageOfShip( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.speed_)*/ 0 - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_.armor_)*/ 0 - - , - /*decltype(_impl_.shield_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.guid_)*/ ::int64_t{0} - - , - /*decltype(_impl_.ship_state_)*/ 0 - - , - /*decltype(_impl_.ship_type_)*/ 0 - - , - /*decltype(_impl_.view_range_)*/ 0 - - , - /*decltype(_impl_.producer_type_)*/ 0 - - , - /*decltype(_impl_.constructor_type_)*/ 0 - - , - /*decltype(_impl_.armor_type_)*/ 0 - - , - /*decltype(_impl_.shield_type_)*/ 0 - - , - /*decltype(_impl_.weapon_type_)*/ 0 - - , - /*decltype(_impl_.facing_direction_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.speed_)*/ 0, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_.armor_)*/ 0, /*decltype(_impl_.shield_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.guid_)*/ int64_t{0}, /*decltype(_impl_.ship_state_)*/ 0, /*decltype(_impl_.ship_type_)*/ 0, /*decltype(_impl_.view_range_)*/ 0, /*decltype(_impl_.producer_type_)*/ 0, /*decltype(_impl_.constructor_type_)*/ 0, /*decltype(_impl_.armor_type_)*/ 0, /*decltype(_impl_.shield_type_)*/ 0, /*decltype(_impl_.weapon_type_)*/ 0, /*decltype(_impl_.facing_direction_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfShipDefaultTypeInternal @@ -96,42 +43,12 @@ namespace protobuf MessageOfShip _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfShipDefaultTypeInternal _MessageOfShip_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfShipDefaultTypeInternal _MessageOfShip_default_instance_; PROTOBUF_CONSTEXPR MessageOfBullet::MessageOfBullet( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.type_)*/ 0 - - , - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.facing_direction_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.damage_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.guid_)*/ ::int64_t{0} - - , - /*decltype(_impl_.bomb_range_)*/ 0 - - , - /*decltype(_impl_.speed_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.type_)*/ 0, /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.facing_direction_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.damage_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.guid_)*/ int64_t{0}, /*decltype(_impl_.bomb_range_)*/ 0, /*decltype(_impl_.speed_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfBulletDefaultTypeInternal @@ -148,33 +65,12 @@ namespace protobuf MessageOfBullet _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfBulletDefaultTypeInternal _MessageOfBullet_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfBulletDefaultTypeInternal _MessageOfBullet_default_instance_; PROTOBUF_CONSTEXPR MessageOfBombedBullet::MessageOfBombedBullet( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.type_)*/ 0 - - , - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.facing_direction_)*/ 0 - - , - /*decltype(_impl_.mapping_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.bomb_range_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.type_)*/ 0, /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.facing_direction_)*/ 0, /*decltype(_impl_.mapping_id_)*/ int64_t{0}, /*decltype(_impl_.bomb_range_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfBombedBulletDefaultTypeInternal @@ -191,27 +87,12 @@ namespace protobuf MessageOfBombedBullet _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfBombedBulletDefaultTypeInternal _MessageOfBombedBullet_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfBombedBulletDefaultTypeInternal _MessageOfBombedBullet_default_instance_; PROTOBUF_CONSTEXPR MessageOfFactory::MessageOfFactory( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfFactoryDefaultTypeInternal @@ -228,27 +109,12 @@ namespace protobuf MessageOfFactory _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfFactoryDefaultTypeInternal _MessageOfFactory_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfFactoryDefaultTypeInternal _MessageOfFactory_default_instance_; PROTOBUF_CONSTEXPR MessageOfCommunity::MessageOfCommunity( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfCommunityDefaultTypeInternal @@ -265,27 +131,12 @@ namespace protobuf MessageOfCommunity _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfCommunityDefaultTypeInternal _MessageOfCommunity_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfCommunityDefaultTypeInternal _MessageOfCommunity_default_instance_; PROTOBUF_CONSTEXPR MessageOfFort::MessageOfFort( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfFortDefaultTypeInternal @@ -302,24 +153,12 @@ namespace protobuf MessageOfFort _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfFortDefaultTypeInternal _MessageOfFort_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfFortDefaultTypeInternal _MessageOfFort_default_instance_; PROTOBUF_CONSTEXPR MessageOfWormhole::MessageOfWormhole( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfWormholeDefaultTypeInternal @@ -336,24 +175,12 @@ namespace protobuf MessageOfWormhole _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfWormholeDefaultTypeInternal _MessageOfWormhole_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfWormholeDefaultTypeInternal _MessageOfWormhole_default_instance_; PROTOBUF_CONSTEXPR MessageOfResource::MessageOfResource( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.progress_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.progress_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfResourceDefaultTypeInternal @@ -370,27 +197,12 @@ namespace protobuf MessageOfResource _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfResourceDefaultTypeInternal _MessageOfResource_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfResourceDefaultTypeInternal _MessageOfResource_default_instance_; PROTOBUF_CONSTEXPR MessageOfHome::MessageOfHome( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.hp_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfHomeDefaultTypeInternal @@ -407,18 +219,12 @@ namespace protobuf MessageOfHome _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfHomeDefaultTypeInternal _MessageOfHome_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfHomeDefaultTypeInternal _MessageOfHome_default_instance_; PROTOBUF_CONSTEXPR MessageOfMap_Row::MessageOfMap_Row( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.cols_)*/ {}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.cols_)*/ {}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfMap_RowDefaultTypeInternal @@ -435,21 +241,12 @@ namespace protobuf MessageOfMap_Row _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfMap_RowDefaultTypeInternal _MessageOfMap_Row_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfMap_RowDefaultTypeInternal _MessageOfMap_Row_default_instance_; PROTOBUF_CONSTEXPR MessageOfMap::MessageOfMap( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.rows_)*/ {}, /*decltype(_impl_.height_)*/ 0u - - , - /*decltype(_impl_.width_)*/ 0u - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.rows_)*/ {}, /*decltype(_impl_.height_)*/ 0u, /*decltype(_impl_.width_)*/ 0u, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfMapDefaultTypeInternal @@ -466,30 +263,12 @@ namespace protobuf MessageOfMap _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfMapDefaultTypeInternal _MessageOfMap_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfMapDefaultTypeInternal _MessageOfMap_default_instance_; PROTOBUF_CONSTEXPR MessageOfTeam::MessageOfTeam( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.score_)*/ 0 - - , - /*decltype(_impl_.money_)*/ 0 - - , - /*decltype(_impl_.guid_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.score_)*/ 0, /*decltype(_impl_.money_)*/ 0, /*decltype(_impl_.guid_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfTeamDefaultTypeInternal @@ -506,10 +285,7 @@ namespace protobuf MessageOfTeam _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfTeamDefaultTypeInternal _MessageOfTeam_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfTeamDefaultTypeInternal _MessageOfTeam_default_instance_; PROTOBUF_CONSTEXPR MessageOfObj::MessageOfObj( ::_pbi::ConstantInitialized ) : @@ -531,24 +307,12 @@ namespace protobuf MessageOfObj _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfObjDefaultTypeInternal _MessageOfObj_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfObjDefaultTypeInternal _MessageOfObj_default_instance_; PROTOBUF_CONSTEXPR MessageOfAll::MessageOfAll( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.game_time_)*/ 0 - - , - /*decltype(_impl_.red_team_score_)*/ 0 - - , - /*decltype(_impl_.blue_team_score_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.game_time_)*/ 0, /*decltype(_impl_.red_team_score_)*/ 0, /*decltype(_impl_.blue_team_score_)*/ 0, /*decltype(_impl_.red_team_money_)*/ 0, /*decltype(_impl_.blue_team_money_)*/ 0, /*decltype(_impl_.red_home_hp_)*/ 0, /*decltype(_impl_.blue_home_hp_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageOfAllDefaultTypeInternal @@ -565,15 +329,12 @@ namespace protobuf MessageOfAll _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfAllDefaultTypeInternal _MessageOfAll_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfAllDefaultTypeInternal _MessageOfAll_default_instance_; PROTOBUF_CONSTEXPR MessageToClient::MessageToClient( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_._has_bits_)*/ {}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_.obj_message_)*/ {}, /*decltype(_impl_.all_message_)*/ nullptr, /*decltype(_impl_.game_state_)*/ 0} + /*decltype(_impl_.obj_message_)*/ {}, /*decltype(_impl_.all_message_)*/ nullptr, /*decltype(_impl_.game_state_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct MessageToClientDefaultTypeInternal @@ -590,24 +351,12 @@ namespace protobuf MessageToClient _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageToClientDefaultTypeInternal _MessageToClient_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageToClientDefaultTypeInternal _MessageToClient_default_instance_; PROTOBUF_CONSTEXPR MoveRes::MoveRes( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.actual_speed_)*/ ::int64_t{0} - - , - /*decltype(_impl_.actual_angle_)*/ 0 - - , - /*decltype(_impl_.act_success_)*/ false - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.actual_speed_)*/ int64_t{0}, /*decltype(_impl_.actual_angle_)*/ 0, /*decltype(_impl_.act_success_)*/ false, /*decltype(_impl_._cached_size_)*/ {}} { } struct MoveResDefaultTypeInternal @@ -624,18 +373,12 @@ namespace protobuf MoveRes _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MoveResDefaultTypeInternal _MoveRes_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MoveResDefaultTypeInternal _MoveRes_default_instance_; PROTOBUF_CONSTEXPR BoolRes::BoolRes( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.act_success_)*/ false - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.act_success_)*/ false, /*decltype(_impl_._cached_size_)*/ {}} { } struct BoolResDefaultTypeInternal @@ -652,10 +395,7 @@ namespace protobuf BoolRes _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BoolResDefaultTypeInternal _BoolRes_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BoolResDefaultTypeInternal _BoolRes_default_instance_; PROTOBUF_CONSTEXPR ShipInfoRes::ShipInfoRes( ::_pbi::ConstantInitialized ) : @@ -677,18 +417,12 @@ namespace protobuf ShipInfoRes _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ShipInfoResDefaultTypeInternal _ShipInfoRes_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ShipInfoResDefaultTypeInternal _ShipInfoRes_default_instance_; PROTOBUF_CONSTEXPR EcoRes::EcoRes( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.economy_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.economy_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct EcoResDefaultTypeInternal @@ -705,23 +439,12 @@ namespace protobuf EcoRes _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 EcoResDefaultTypeInternal _EcoRes_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 EcoResDefaultTypeInternal _EcoRes_default_instance_; PROTOBUF_CONSTEXPR MessageOfNews::MessageOfNews( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.from_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.to_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.news_)*/ {}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}} + /*decltype(_impl_.from_id_)*/ int64_t{0}, /*decltype(_impl_.to_id_)*/ int64_t{0}, /*decltype(_impl_.news_)*/ {}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}} { } struct MessageOfNewsDefaultTypeInternal @@ -738,26 +461,19 @@ namespace protobuf MessageOfNews _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfNewsDefaultTypeInternal _MessageOfNews_default_instance_; + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MessageOfNewsDefaultTypeInternal _MessageOfNews_default_instance_; } // namespace protobuf static ::_pb::Metadata file_level_metadata_Message2Clients_2eproto[20]; -static constexpr const ::_pb::EnumDescriptor** - file_level_enum_descriptors_Message2Clients_2eproto = nullptr; -static constexpr const ::_pb::ServiceDescriptor** - file_level_service_descriptors_Message2Clients_2eproto = nullptr; -const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE( - protodesc_cold -) = { +static constexpr ::_pb::EnumDescriptor const** file_level_enum_descriptors_Message2Clients_2eproto = nullptr; +static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_Message2Clients_2eproto = nullptr; + +const uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfShip, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfShip, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfShip, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfShip, _impl_.speed_), @@ -782,8 +498,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBullet, _impl_.type_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBullet, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBullet, _impl_.y_), @@ -799,8 +513,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBombedBullet, _impl_.type_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBombedBullet, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfBombedBullet, _impl_.y_), @@ -813,8 +525,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFactory, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFactory, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFactory, _impl_.hp_), @@ -825,8 +535,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfCommunity, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfCommunity, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfCommunity, _impl_.hp_), @@ -837,8 +545,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFort, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFort, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfFort, _impl_.hp_), @@ -849,8 +555,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfWormhole, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfWormhole, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfWormhole, _impl_.hp_), @@ -860,8 +564,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfResource, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfResource, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfResource, _impl_.progress_), @@ -871,8 +573,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfHome, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfHome, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfHome, _impl_.hp_), @@ -883,8 +583,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfMap_Row, _impl_.cols_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfMap, _internal_metadata_), @@ -892,8 +590,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfMap, _impl_.height_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfMap, _impl_.width_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfMap, _impl_.rows_), @@ -903,8 +599,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfTeam, _impl_.team_id_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfTeam, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfTeam, _impl_.score_), @@ -916,8 +610,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfObj, _impl_._oneof_case_[0]), ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) ::_pbi::kInvalidFieldOffsetTag, ::_pbi::kInvalidFieldOffsetTag, ::_pbi::kInvalidFieldOffsetTag, @@ -937,33 +629,28 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.game_time_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.red_team_score_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.blue_team_score_), - PROTOBUF_FIELD_OFFSET(::protobuf::MessageToClient, _impl_._has_bits_), + PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.red_team_money_), + PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.blue_team_money_), + PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.red_home_hp_), + PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfAll, _impl_.blue_home_hp_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::MessageToClient, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MessageToClient, _impl_.obj_message_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageToClient, _impl_.game_state_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageToClient, _impl_.all_message_), - ~0u, - ~0u, - 0, ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::MoveRes, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MoveRes, _impl_.actual_speed_), PROTOBUF_FIELD_OFFSET(::protobuf::MoveRes, _impl_.actual_angle_), PROTOBUF_FIELD_OFFSET(::protobuf::MoveRes, _impl_.act_success_), @@ -973,8 +660,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::BoolRes, _impl_.act_success_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::ShipInfoRes, _internal_metadata_), @@ -982,8 +667,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::ShipInfoRes, _impl_.ship_info_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::EcoRes, _internal_metadata_), @@ -991,8 +674,6 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::EcoRes, _impl_.economy_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _internal_metadata_), @@ -1000,37 +681,33 @@ const ::uint32_t TableStruct_Message2Clients_2eproto::offsets[] PROTOBUF_SECTION PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _impl_._oneof_case_[0]), ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) ::_pbi::kInvalidFieldOffsetTag, ::_pbi::kInvalidFieldOffsetTag, PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _impl_.from_id_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _impl_.to_id_), PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _impl_.news_), }; - -static const ::_pbi::MigrationSchema - schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { - {0, -1, -1, sizeof(::protobuf::MessageOfShip)}, - {26, -1, -1, sizeof(::protobuf::MessageOfBullet)}, - {43, -1, -1, sizeof(::protobuf::MessageOfBombedBullet)}, - {57, -1, -1, sizeof(::protobuf::MessageOfFactory)}, - {69, -1, -1, sizeof(::protobuf::MessageOfCommunity)}, - {81, -1, -1, sizeof(::protobuf::MessageOfFort)}, - {93, -1, -1, sizeof(::protobuf::MessageOfWormhole)}, - {104, -1, -1, sizeof(::protobuf::MessageOfResource)}, - {115, -1, -1, sizeof(::protobuf::MessageOfHome)}, - {127, -1, -1, sizeof(::protobuf::MessageOfMap_Row)}, - {136, -1, -1, sizeof(::protobuf::MessageOfMap)}, - {147, -1, -1, sizeof(::protobuf::MessageOfTeam)}, - {160, -1, -1, sizeof(::protobuf::MessageOfObj)}, - {181, -1, -1, sizeof(::protobuf::MessageOfAll)}, - {192, 203, -1, sizeof(::protobuf::MessageToClient)}, - {206, -1, -1, sizeof(::protobuf::MoveRes)}, - {217, -1, -1, sizeof(::protobuf::BoolRes)}, - {226, -1, -1, sizeof(::protobuf::ShipInfoRes)}, - {235, -1, -1, sizeof(::protobuf::EcoRes)}, - {244, -1, -1, sizeof(::protobuf::MessageOfNews)}, +static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + {0, -1, -1, sizeof(::protobuf::MessageOfShip)}, + {24, -1, -1, sizeof(::protobuf::MessageOfBullet)}, + {39, -1, -1, sizeof(::protobuf::MessageOfBombedBullet)}, + {51, -1, -1, sizeof(::protobuf::MessageOfFactory)}, + {61, -1, -1, sizeof(::protobuf::MessageOfCommunity)}, + {71, -1, -1, sizeof(::protobuf::MessageOfFort)}, + {81, -1, -1, sizeof(::protobuf::MessageOfWormhole)}, + {90, -1, -1, sizeof(::protobuf::MessageOfResource)}, + {99, -1, -1, sizeof(::protobuf::MessageOfHome)}, + {109, -1, -1, sizeof(::protobuf::MessageOfMap_Row)}, + {116, -1, -1, sizeof(::protobuf::MessageOfMap)}, + {125, -1, -1, sizeof(::protobuf::MessageOfTeam)}, + {136, -1, -1, sizeof(::protobuf::MessageOfObj)}, + {155, -1, -1, sizeof(::protobuf::MessageOfAll)}, + {168, -1, -1, sizeof(::protobuf::MessageToClient)}, + {177, -1, -1, sizeof(::protobuf::MoveRes)}, + {186, -1, -1, sizeof(::protobuf::BoolRes)}, + {193, -1, -1, sizeof(::protobuf::ShipInfoRes)}, + {200, -1, -1, sizeof(::protobuf::EcoRes)}, + {207, -1, -1, sizeof(::protobuf::MessageOfNews)}, }; static const ::_pb::Message* const file_default_instances[] = { @@ -1055,7 +732,8 @@ static const ::_pb::Message* const file_default_instances[] = { &::protobuf::_EcoRes_default_instance_._instance, &::protobuf::_MessageOfNews_default_instance_._instance, }; -const char descriptor_table_protodef_Message2Clients_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + +const char descriptor_table_protodef_Message2Clients_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\025Message2Clients.proto\022\010protobuf\032\021Messa" "geType.proto\"\362\003\n\rMessageOfShip\022\t\n\001x\030\001 \001(" "\005\022\t\n\001y\030\002 \001(\005\022\r\n\005speed\030\003 \001(\005\022\n\n\002hp\030\004 \001(\005\022" @@ -1110,30 +788,31 @@ const char descriptor_table_protodef_Message2Clients_2eproto[] PROTOBUF_SECTION_ "MessageOfNewsH\000\022@\n\025bombed_bullet_message" "\030\013 \001(\0132\037.protobuf.MessageOfBombedBulletH" "\000\022/\n\014team_message\030\014 \001(\0132\027.protobuf.Messa" - "geOfTeamH\000B\020\n\016message_of_obj\"R\n\014MessageO" - "fAll\022\021\n\tgame_time\030\001 \001(\005\022\026\n\016red_team_scor" - "e\030\002 \001(\005\022\027\n\017blue_team_score\030\003 \001(\005\"\224\001\n\017Mes" - "sageToClient\022+\n\013obj_message\030\001 \003(\0132\026.prot" - "obuf.MessageOfObj\022\'\n\ngame_state\030\002 \001(\0162\023." - "protobuf.GameState\022+\n\013all_message\030\003 \001(\0132" - "\026.protobuf.MessageOfAll\"J\n\007MoveRes\022\024\n\014ac" - "tual_speed\030\001 \001(\003\022\024\n\014actual_angle\030\002 \001(\001\022\023" - "\n\013act_success\030\003 \001(\010\"\036\n\007BoolRes\022\023\n\013act_su" - "ccess\030\001 \001(\010\"9\n\013ShipInfoRes\022*\n\tship_info\030" - "\001 \003(\0132\027.protobuf.MessageOfShip\"\031\n\006EcoRes" - "\022\017\n\007economy\030\001 \001(\003\"i\n\rMessageOfNews\022\026\n\014te" - "xt_message\030\001 \001(\tH\000\022\030\n\016binary_message\030\004 \001" - "(\014H\000\022\017\n\007from_id\030\002 \001(\003\022\r\n\005to_id\030\003 \001(\003B\006\n\004" - "newsb\006proto3"}; -static const ::_pbi::DescriptorTable* const descriptor_table_Message2Clients_2eproto_deps[1] = - { - &::descriptor_table_MessageType_2eproto, + "geOfTeamH\000B\020\n\016message_of_obj\"\256\001\n\014Message" + "OfAll\022\021\n\tgame_time\030\001 \001(\005\022\026\n\016red_team_sco" + "re\030\002 \001(\005\022\027\n\017blue_team_score\030\003 \001(\005\022\026\n\016red" + "_team_money\030\004 \001(\005\022\027\n\017blue_team_money\030\005 \001" + "(\005\022\023\n\013red_home_hp\030\006 \001(\005\022\024\n\014blue_home_hp\030" + "\007 \001(\005\"\224\001\n\017MessageToClient\022+\n\013obj_message" + "\030\001 \003(\0132\026.protobuf.MessageOfObj\022\'\n\ngame_s" + "tate\030\002 \001(\0162\023.protobuf.GameState\022+\n\013all_m" + "essage\030\003 \001(\0132\026.protobuf.MessageOfAll\"J\n\007" + "MoveRes\022\024\n\014actual_speed\030\001 \001(\003\022\024\n\014actual_" + "angle\030\002 \001(\001\022\023\n\013act_success\030\003 \001(\010\"\036\n\007Bool" + "Res\022\023\n\013act_success\030\001 \001(\010\"9\n\013ShipInfoRes\022" + "*\n\tship_info\030\001 \003(\0132\027.protobuf.MessageOfS" + "hip\"\031\n\006EcoRes\022\017\n\007economy\030\001 \001(\003\"i\n\rMessag" + "eOfNews\022\026\n\014text_message\030\001 \001(\tH\000\022\030\n\016binar" + "y_message\030\004 \001(\014H\000\022\017\n\007from_id\030\002 \001(\003\022\r\n\005to" + "_id\030\003 \001(\003B\006\n\004newsb\006proto3"; +static const ::_pbi::DescriptorTable* const descriptor_table_Message2Clients_2eproto_deps[1] = { + &::descriptor_table_MessageType_2eproto, }; -static ::absl::once_flag descriptor_table_Message2Clients_2eproto_once; +static ::_pbi::once_flag descriptor_table_Message2Clients_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_Message2Clients_2eproto = { false, false, - 2732, + 2825, descriptor_table_protodef_Message2Clients_2eproto, "Message2Clients.proto", &descriptor_table_Message2Clients_2eproto_once, @@ -1147,27 +826,16 @@ const ::_pbi::DescriptorTable descriptor_table_Message2Clients_2eproto = { file_level_enum_descriptors_Message2Clients_2eproto, file_level_service_descriptors_Message2Clients_2eproto, }; - -// This function exists to be marked as weak. -// It can significantly speed up compilation by breaking up LLVM's SCC -// in the .pb.cc translation units. Large translation units see a -// reduction of more than 35% of walltime for optimized builds. Without -// the weak attribute all the messages in the file, including all the -// vtables and everything they use become part of the same SCC through -// a cycle like: -// GetMetadata -> descriptor table -> default instances -> -// vtables -> GetMetadata -// By adding a weak function here we break the connection from the -// individual vtables back into the descriptor table. PROTOBUF_ATTRIBUTE_WEAK const ::_pbi::DescriptorTable* descriptor_table_Message2Clients_2eproto_getter() { return &descriptor_table_Message2Clients_2eproto; } + // Force running AddDescriptors() at dynamic initialization time. -PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 -static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Message2Clients_2eproto(&descriptor_table_Message2Clients_2eproto); +PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Message2Clients_2eproto(&descriptor_table_Message2Clients_2eproto); namespace protobuf { + // =================================================================== class MessageOfShip::_Internal @@ -1175,81 +843,33 @@ namespace protobuf public: }; - MessageOfShip::MessageOfShip(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfShip::MessageOfShip(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfShip) } MessageOfShip::MessageOfShip(const MessageOfShip& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfShip* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.speed_){}, decltype(_impl_.hp_){}, decltype(_impl_.armor_){}, decltype(_impl_.shield_){}, decltype(_impl_.team_id_){}, decltype(_impl_.player_id_){}, decltype(_impl_.guid_){}, decltype(_impl_.ship_state_){}, decltype(_impl_.ship_type_){}, decltype(_impl_.view_range_){}, decltype(_impl_.producer_type_){}, decltype(_impl_.constructor_type_){}, decltype(_impl_.armor_type_){}, decltype(_impl_.shield_type_){}, decltype(_impl_.weapon_type_){}, decltype(_impl_.facing_direction_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.facing_direction_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.facing_direction_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfShip) } - inline void MessageOfShip::SharedCtor(::_pb::Arena* arena) + inline void MessageOfShip::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.speed_){0} - - , - decltype(_impl_.hp_){0} - - , - decltype(_impl_.armor_){0} - - , - decltype(_impl_.shield_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.guid_){::int64_t{0}} - - , - decltype(_impl_.ship_state_){0} - - , - decltype(_impl_.ship_type_){0} - - , - decltype(_impl_.view_range_){0} - - , - decltype(_impl_.producer_type_){0} - - , - decltype(_impl_.constructor_type_){0} - - , - decltype(_impl_.armor_type_){0} - - , - decltype(_impl_.shield_type_){0} - - , - decltype(_impl_.weapon_type_){0} - - , - decltype(_impl_.facing_direction_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.speed_){0}, decltype(_impl_.hp_){0}, decltype(_impl_.armor_){0}, decltype(_impl_.shield_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.guid_){int64_t{0}}, decltype(_impl_.ship_state_){0}, decltype(_impl_.ship_type_){0}, decltype(_impl_.view_range_){0}, decltype(_impl_.producer_type_){0}, decltype(_impl_.constructor_type_){0}, decltype(_impl_.armor_type_){0}, decltype(_impl_.shield_type_){0}, decltype(_impl_.weapon_type_){0}, decltype(_impl_.facing_direction_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfShip::~MessageOfShip() @@ -1265,7 +885,7 @@ namespace protobuf inline void MessageOfShip::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfShip::SetCachedSize(int size) const @@ -1276,11 +896,11 @@ namespace protobuf void MessageOfShip::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfShip) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.facing_direction_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.facing_direction_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.facing_direction_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.facing_direction_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1291,232 +911,196 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 speed = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.speed_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 armor = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.armor_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 shield = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 48)) { _impl_.shield_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 7; case 7: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 56)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 player_id = 8; case 8: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 64)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 64)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 guid = 9; case 9: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 72)) { _impl_.guid_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.ShipState ship_state = 10; case 10: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 80)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 80)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_ship_state(static_cast<::protobuf::ShipState>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.ShipType ship_type = 11; case 11: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 88)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 88)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_ship_type(static_cast<::protobuf::ShipType>(val)); } else - { goto handle_unusual; - } continue; // int32 view_range = 12; case 12: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 96)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 96)) { _impl_.view_range_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.ProducerType producer_type = 13; case 13: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 104)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 104)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_producer_type(static_cast<::protobuf::ProducerType>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.ConstructorType constructor_type = 14; case 14: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 112)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 112)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_constructor_type(static_cast<::protobuf::ConstructorType>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.ArmorType armor_type = 15; case 15: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 120)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 120)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_armor_type(static_cast<::protobuf::ArmorType>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.ShieldType shield_type = 16; case 16: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 128)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 128)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_shield_type(static_cast<::protobuf::ShieldType>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.WeaponType weapon_type = 17; case 17: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 136)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 136)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_weapon_type(static_cast<::protobuf::WeaponType>(val)); } else - { goto handle_unusual; - } continue; // double facing_direction = 18; case 18: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 145)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 145)) { _impl_.facing_direction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -1544,93 +1128,75 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfShip::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfShip::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfShip) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 speed = 3; if (this->_internal_speed() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_speed(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_speed(), target); } // int32 hp = 4; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 4, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_hp(), target); } // int32 armor = 5; if (this->_internal_armor() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 5, this->_internal_armor(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(5, this->_internal_armor(), target); } // int32 shield = 6; if (this->_internal_shield() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 6, this->_internal_shield(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(6, this->_internal_shield(), target); } // int64 team_id = 7; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 7, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(7, this->_internal_team_id(), target); } // int64 player_id = 8; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 8, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(8, this->_internal_player_id(), target); } // int64 guid = 9; if (this->_internal_guid() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 9, this->_internal_guid(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(9, this->_internal_guid(), target); } // .protobuf.ShipState ship_state = 10; @@ -1655,9 +1221,7 @@ namespace protobuf if (this->_internal_view_range() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 12, this->_internal_view_range(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(12, this->_internal_view_range(), target); } // .protobuf.ProducerType producer_type = 13; @@ -1706,16 +1270,14 @@ namespace protobuf } // double facing_direction = 18; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 18, this->_internal_facing_direction(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(18, this->_internal_facing_direction(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -1728,85 +1290,67 @@ namespace protobuf return target; } - ::size_t MessageOfShip::ByteSizeLong() const + size_t MessageOfShip::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfShip) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int32 speed = 3; if (this->_internal_speed() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_speed() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_speed()); } // int32 hp = 4; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } // int32 armor = 5; if (this->_internal_armor() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_armor() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_armor()); } // int32 shield = 6; if (this->_internal_shield() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_shield() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_shield()); } // int64 team_id = 7; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int64 player_id = 8; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 guid = 9; if (this->_internal_guid() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_guid() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_guid()); } // .protobuf.ShipState ship_state = 10; @@ -1826,9 +1370,7 @@ namespace protobuf // int32 view_range = 12; if (this->_internal_view_range() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_view_range() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_view_range()); } // .protobuf.ProducerType producer_type = 13; @@ -1867,13 +1409,13 @@ namespace protobuf } // double facing_direction = 18; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { - total_size += 10; + total_size += 2 + 8; } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -1892,8 +1434,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfShip) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -1964,9 +1506,9 @@ namespace protobuf { _this->_internal_set_weapon_type(from._internal_weapon_type()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = from._internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { @@ -2006,6 +1548,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[0] ); } + // =================================================================== class MessageOfBullet::_Internal @@ -2013,54 +1556,33 @@ namespace protobuf public: }; - MessageOfBullet::MessageOfBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfBullet::MessageOfBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfBullet) } MessageOfBullet::MessageOfBullet(const MessageOfBullet& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfBullet* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.type_){}, decltype(_impl_.x_){}, decltype(_impl_.facing_direction_){}, decltype(_impl_.y_){}, decltype(_impl_.damage_){}, decltype(_impl_.team_id_){}, decltype(_impl_.guid_){}, decltype(_impl_.bomb_range_){}, decltype(_impl_.speed_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.type_, &from._impl_.type_, static_cast(reinterpret_cast(&_impl_.speed_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.speed_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfBullet) } - inline void MessageOfBullet::SharedCtor(::_pb::Arena* arena) + inline void MessageOfBullet::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.type_){0} - - , - decltype(_impl_.x_){0} - - , - decltype(_impl_.facing_direction_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.damage_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.guid_){::int64_t{0}} - - , - decltype(_impl_.bomb_range_){0} - - , - decltype(_impl_.speed_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.type_){0}, decltype(_impl_.x_){0}, decltype(_impl_.facing_direction_){0}, decltype(_impl_.y_){0}, decltype(_impl_.damage_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.guid_){int64_t{0}}, decltype(_impl_.bomb_range_){0}, decltype(_impl_.speed_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfBullet::~MessageOfBullet() @@ -2076,7 +1598,7 @@ namespace protobuf inline void MessageOfBullet::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfBullet::SetCachedSize(int size) const @@ -2087,11 +1609,11 @@ namespace protobuf void MessageOfBullet::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfBullet) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.type_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.speed_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.speed_)); + ::memset(&_impl_.type_, 0, static_cast(reinterpret_cast(&_impl_.speed_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.speed_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2102,118 +1624,100 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // .protobuf.BulletType type = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_type(static_cast<::protobuf::BulletType>(val)); } else - { goto handle_unusual; - } continue; // int32 x = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double facing_direction = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 33)) { _impl_.facing_direction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // int32 damage = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.damage_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 48)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 48)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 guid = 7; case 7: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 56)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 56)) { _impl_.guid_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double bomb_range = 8; case 8: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 65)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 65)) { _impl_.bomb_range_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // int32 speed = 9; case 9: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 72)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 72)) { _impl_.speed_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -2241,12 +1745,12 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfBullet::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfBullet::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfBullet) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // .protobuf.BulletType type = 1; @@ -2262,80 +1766,64 @@ namespace protobuf if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_x(), target); } // int32 y = 3; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_y(), target); } // double facing_direction = 4; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 4, this->_internal_facing_direction(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(4, this->_internal_facing_direction(), target); } // int32 damage = 5; if (this->_internal_damage() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 5, this->_internal_damage(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(5, this->_internal_damage(), target); } // int64 team_id = 6; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 6, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(6, this->_internal_team_id(), target); } // int64 guid = 7; if (this->_internal_guid() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 7, this->_internal_guid(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(7, this->_internal_guid(), target); } // double bomb_range = 8; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = this->_internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 8, this->_internal_bomb_range(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(8, this->_internal_bomb_range(), target); } // int32 speed = 9; if (this->_internal_speed() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 9, this->_internal_speed(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(9, this->_internal_speed(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -2348,12 +1836,12 @@ namespace protobuf return target; } - ::size_t MessageOfBullet::ByteSizeLong() const + size_t MessageOfBullet::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfBullet) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; @@ -2367,69 +1855,57 @@ namespace protobuf // int32 x = 2; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // double facing_direction = 4; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { - total_size += 9; + total_size += 1 + 8; } // int32 y = 3; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int32 damage = 5; if (this->_internal_damage() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_damage() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_damage()); } // int64 team_id = 6; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int64 guid = 7; if (this->_internal_guid() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_guid() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_guid()); } // double bomb_range = 8; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = this->_internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { - total_size += 9; + total_size += 1 + 8; } // int32 speed = 9; if (this->_internal_speed() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_speed() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_speed()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -2448,8 +1924,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfBullet) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_type() != 0) @@ -2460,9 +1936,9 @@ namespace protobuf { _this->_internal_set_x(from._internal_x()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = from._internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { @@ -2484,9 +1960,9 @@ namespace protobuf { _this->_internal_set_guid(from._internal_guid()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = from._internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { @@ -2530,6 +2006,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[1] ); } + // =================================================================== class MessageOfBombedBullet::_Internal @@ -2537,45 +2014,33 @@ namespace protobuf public: }; - MessageOfBombedBullet::MessageOfBombedBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfBombedBullet::MessageOfBombedBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfBombedBullet) } MessageOfBombedBullet::MessageOfBombedBullet(const MessageOfBombedBullet& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfBombedBullet* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.type_){}, decltype(_impl_.x_){}, decltype(_impl_.facing_direction_){}, decltype(_impl_.mapping_id_){}, decltype(_impl_.bomb_range_){}, decltype(_impl_.y_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.type_, &from._impl_.type_, static_cast(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.y_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfBombedBullet) } - inline void MessageOfBombedBullet::SharedCtor(::_pb::Arena* arena) + inline void MessageOfBombedBullet::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.type_){0} - - , - decltype(_impl_.x_){0} - - , - decltype(_impl_.facing_direction_){0} - - , - decltype(_impl_.mapping_id_){::int64_t{0}} - - , - decltype(_impl_.bomb_range_){0} - - , - decltype(_impl_.y_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.type_){0}, decltype(_impl_.x_){0}, decltype(_impl_.facing_direction_){0}, decltype(_impl_.mapping_id_){int64_t{0}}, decltype(_impl_.bomb_range_){0}, decltype(_impl_.y_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfBombedBullet::~MessageOfBombedBullet() @@ -2591,7 +2056,7 @@ namespace protobuf inline void MessageOfBombedBullet::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfBombedBullet::SetCachedSize(int size) const @@ -2602,11 +2067,11 @@ namespace protobuf void MessageOfBombedBullet::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfBombedBullet) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.type_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.y_)); + ::memset(&_impl_.type_, 0, static_cast(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.type_)) + sizeof(_impl_.y_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2617,82 +2082,70 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // .protobuf.BulletType type = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_type(static_cast<::protobuf::BulletType>(val)); } else - { goto handle_unusual; - } continue; // int32 x = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double facing_direction = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 33)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 33)) { _impl_.facing_direction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // int64 mapping_id = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.mapping_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double bomb_range = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 49)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 49)) { _impl_.bomb_range_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -2720,12 +2173,12 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfBombedBullet::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfBombedBullet::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfBombedBullet) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // .protobuf.BulletType type = 1; @@ -2741,53 +2194,43 @@ namespace protobuf if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_x(), target); } // int32 y = 3; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_y(), target); } // double facing_direction = 4; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 4, this->_internal_facing_direction(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(4, this->_internal_facing_direction(), target); } // int64 mapping_id = 5; if (this->_internal_mapping_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 5, this->_internal_mapping_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_mapping_id(), target); } // double bomb_range = 6; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = this->_internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 6, this->_internal_bomb_range(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(6, this->_internal_bomb_range(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -2800,12 +2243,12 @@ namespace protobuf return target; } - ::size_t MessageOfBombedBullet::ByteSizeLong() const + size_t MessageOfBombedBullet::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfBombedBullet) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; @@ -2819,45 +2262,39 @@ namespace protobuf // int32 x = 2; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // double facing_direction = 4; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = this->_internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { - total_size += 9; + total_size += 1 + 8; } // int64 mapping_id = 5; if (this->_internal_mapping_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_mapping_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_mapping_id()); } // double bomb_range = 6; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = this->_internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { - total_size += 9; + total_size += 1 + 8; } // int32 y = 3; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -2876,8 +2313,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfBombedBullet) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_type() != 0) @@ -2888,9 +2325,9 @@ namespace protobuf { _this->_internal_set_x(from._internal_x()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_facing_direction = from._internal_facing_direction(); - ::uint64_t raw_facing_direction; + uint64_t raw_facing_direction; memcpy(&raw_facing_direction, &tmp_facing_direction, sizeof(tmp_facing_direction)); if (raw_facing_direction != 0) { @@ -2900,9 +2337,9 @@ namespace protobuf { _this->_internal_set_mapping_id(from._internal_mapping_id()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_bomb_range = from._internal_bomb_range(); - ::uint64_t raw_bomb_range; + uint64_t raw_bomb_range; memcpy(&raw_bomb_range, &tmp_bomb_range, sizeof(tmp_bomb_range)); if (raw_bomb_range != 0) { @@ -2946,6 +2383,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[2] ); } + // =================================================================== class MessageOfFactory::_Internal @@ -2953,39 +2391,33 @@ namespace protobuf public: }; - MessageOfFactory::MessageOfFactory(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfFactory::MessageOfFactory(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfFactory) } MessageOfFactory::MessageOfFactory(const MessageOfFactory& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfFactory* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.team_id_){}, decltype(_impl_.hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfFactory) } - inline void MessageOfFactory::SharedCtor(::_pb::Arena* arena) + inline void MessageOfFactory::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.hp_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfFactory::~MessageOfFactory() @@ -3001,7 +2433,7 @@ namespace protobuf inline void MessageOfFactory::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfFactory::SetCachedSize(int size) const @@ -3012,11 +2444,11 @@ namespace protobuf void MessageOfFactory::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfFactory) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -3027,57 +2459,49 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -3105,48 +2529,40 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfFactory::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfFactory::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfFactory) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 hp = 3; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_hp(), target); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -3159,45 +2575,37 @@ namespace protobuf return target; } - ::size_t MessageOfFactory::ByteSizeLong() const + size_t MessageOfFactory::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfFactory) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int32 hp = 3; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -3216,8 +2624,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfFactory) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -3270,6 +2678,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[3] ); } + // =================================================================== class MessageOfCommunity::_Internal @@ -3277,39 +2686,33 @@ namespace protobuf public: }; - MessageOfCommunity::MessageOfCommunity(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfCommunity::MessageOfCommunity(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfCommunity) } MessageOfCommunity::MessageOfCommunity(const MessageOfCommunity& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfCommunity* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.team_id_){}, decltype(_impl_.hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfCommunity) } - inline void MessageOfCommunity::SharedCtor(::_pb::Arena* arena) + inline void MessageOfCommunity::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.hp_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfCommunity::~MessageOfCommunity() @@ -3325,7 +2728,7 @@ namespace protobuf inline void MessageOfCommunity::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfCommunity::SetCachedSize(int size) const @@ -3336,11 +2739,11 @@ namespace protobuf void MessageOfCommunity::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfCommunity) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -3351,57 +2754,49 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -3429,48 +2824,40 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfCommunity::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfCommunity::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfCommunity) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 hp = 3; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_hp(), target); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -3483,45 +2870,37 @@ namespace protobuf return target; } - ::size_t MessageOfCommunity::ByteSizeLong() const + size_t MessageOfCommunity::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfCommunity) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int32 hp = 3; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -3540,8 +2919,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfCommunity) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -3594,6 +2973,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[4] ); } + // =================================================================== class MessageOfFort::_Internal @@ -3601,39 +2981,33 @@ namespace protobuf public: }; - MessageOfFort::MessageOfFort(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfFort::MessageOfFort(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfFort) } MessageOfFort::MessageOfFort(const MessageOfFort& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfFort* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.team_id_){}, decltype(_impl_.hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfFort) } - inline void MessageOfFort::SharedCtor(::_pb::Arena* arena) + inline void MessageOfFort::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.hp_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfFort::~MessageOfFort() @@ -3649,7 +3023,7 @@ namespace protobuf inline void MessageOfFort::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfFort::SetCachedSize(int size) const @@ -3660,11 +3034,11 @@ namespace protobuf void MessageOfFort::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfFort) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -3675,57 +3049,49 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -3753,48 +3119,40 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfFort::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfFort::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfFort) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 hp = 3; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_hp(), target); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -3807,45 +3165,37 @@ namespace protobuf return target; } - ::size_t MessageOfFort::ByteSizeLong() const + size_t MessageOfFort::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfFort) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int32 hp = 3; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -3864,8 +3214,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfFort) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -3918,6 +3268,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[5] ); } + // =================================================================== class MessageOfWormhole::_Internal @@ -3925,36 +3276,33 @@ namespace protobuf public: }; - MessageOfWormhole::MessageOfWormhole(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfWormhole::MessageOfWormhole(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfWormhole) } MessageOfWormhole::MessageOfWormhole(const MessageOfWormhole& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfWormhole* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfWormhole) } - inline void MessageOfWormhole::SharedCtor(::_pb::Arena* arena) + inline void MessageOfWormhole::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.hp_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfWormhole::~MessageOfWormhole() @@ -3970,7 +3318,7 @@ namespace protobuf inline void MessageOfWormhole::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfWormhole::SetCachedSize(int size) const @@ -3981,11 +3329,11 @@ namespace protobuf void MessageOfWormhole::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfWormhole) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -3996,45 +3344,39 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -4062,39 +3404,33 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfWormhole::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfWormhole::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfWormhole) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 hp = 3; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_hp(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -4107,37 +3443,31 @@ namespace protobuf return target; } - ::size_t MessageOfWormhole::ByteSizeLong() const + size_t MessageOfWormhole::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfWormhole) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int32 hp = 3; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -4156,8 +3486,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfWormhole) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -4206,6 +3536,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[6] ); } + // =================================================================== class MessageOfResource::_Internal @@ -4213,36 +3544,33 @@ namespace protobuf public: }; - MessageOfResource::MessageOfResource(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfResource::MessageOfResource(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfResource) } MessageOfResource::MessageOfResource(const MessageOfResource& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfResource* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.progress_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.progress_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.progress_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfResource) } - inline void MessageOfResource::SharedCtor(::_pb::Arena* arena) + inline void MessageOfResource::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.progress_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.progress_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfResource::~MessageOfResource() @@ -4258,7 +3586,7 @@ namespace protobuf inline void MessageOfResource::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfResource::SetCachedSize(int size) const @@ -4269,11 +3597,11 @@ namespace protobuf void MessageOfResource::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfResource) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.progress_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.progress_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.progress_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.progress_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -4284,45 +3612,39 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 progress = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.progress_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -4350,39 +3672,33 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfResource::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfResource::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfResource) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 progress = 3; if (this->_internal_progress() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_progress(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_progress(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -4395,37 +3711,31 @@ namespace protobuf return target; } - ::size_t MessageOfResource::ByteSizeLong() const + size_t MessageOfResource::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfResource) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int32 progress = 3; if (this->_internal_progress() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_progress() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_progress()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -4444,8 +3754,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfResource) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -4494,6 +3804,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[7] ); } + // =================================================================== class MessageOfHome::_Internal @@ -4501,39 +3812,33 @@ namespace protobuf public: }; - MessageOfHome::MessageOfHome(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfHome::MessageOfHome(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfHome) } MessageOfHome::MessageOfHome(const MessageOfHome& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfHome* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.team_id_){}, decltype(_impl_.hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfHome) } - inline void MessageOfHome::SharedCtor(::_pb::Arena* arena) + inline void MessageOfHome::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.hp_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfHome::~MessageOfHome() @@ -4549,7 +3854,7 @@ namespace protobuf inline void MessageOfHome::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfHome::SetCachedSize(int size) const @@ -4560,11 +3865,11 @@ namespace protobuf void MessageOfHome::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfHome) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.hp_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -4575,57 +3880,49 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 hp = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -4653,48 +3950,40 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfHome::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfHome::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfHome) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // int32 hp = 3; if (this->_internal_hp() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_hp(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_hp(), target); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -4707,45 +3996,37 @@ namespace protobuf return target; } - ::size_t MessageOfHome::ByteSizeLong() const + size_t MessageOfHome::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfHome) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int32 hp = 3; if (this->_internal_hp() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_hp() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -4764,8 +4045,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfHome) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -4818,6 +4099,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[8] ); } + // =================================================================== class MessageOfMap_Row::_Internal @@ -4825,10 +4107,10 @@ namespace protobuf public: }; - MessageOfMap_Row::MessageOfMap_Row(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfMap_Row::MessageOfMap_Row(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfMap.Row) } MessageOfMap_Row::MessageOfMap_Row(const MessageOfMap_Row& from) : @@ -4837,23 +4119,20 @@ namespace protobuf MessageOfMap_Row* const _this = this; (void)_this; new (&_impl_) Impl_{ - decltype(_impl_.cols_){from._internal_cols()}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.cols_){from._impl_.cols_}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0}, /*decltype(_impl_._cached_size_)*/ {}}; _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfMap.Row) } - inline void MessageOfMap_Row::SharedCtor(::_pb::Arena* arena) + inline void MessageOfMap_Row::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.cols_){arena}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.cols_){arena}, /*decltype(_impl_._cols_cached_byte_size_)*/ {0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfMap_Row::~MessageOfMap_Row() @@ -4869,8 +4148,8 @@ namespace protobuf inline void MessageOfMap_Row::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); - _internal_mutable_cols()->~RepeatedField(); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + _impl_.cols_.~RepeatedField(); } void MessageOfMap_Row::SetCachedSize(int size) const @@ -4881,11 +4160,11 @@ namespace protobuf void MessageOfMap_Row::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfMap.Row) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - _internal_mutable_cols()->Clear(); + _impl_.cols_.Clear(); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -4896,27 +4175,25 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // repeated .protobuf.PlaceType cols = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 10)) { ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedEnumParser(_internal_mutable_cols(), ptr, ctx); CHK_(ptr); } - else if (static_cast<::uint8_t>(tag) == 8) + else if (static_cast(tag) == 8) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_add_cols(static_cast<::protobuf::PlaceType>(val)); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -4944,20 +4221,22 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfMap_Row::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfMap_Row::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfMap.Row) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // repeated .protobuf.PlaceType cols = 1; { - int byte_size = _impl_._cols_cached_byte_size_.Get(); + int byte_size = _impl_._cols_cached_byte_size_.load(std::memory_order_relaxed); if (byte_size > 0) { - target = stream->WriteEnumPacked(1, _internal_cols(), byte_size, target); + target = stream->WriteEnumPacked( + 1, _impl_.cols_, byte_size, target + ); } } @@ -4971,35 +4250,33 @@ namespace protobuf return target; } - ::size_t MessageOfMap_Row::ByteSizeLong() const + size_t MessageOfMap_Row::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfMap.Row) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // repeated .protobuf.PlaceType cols = 1; { - std::size_t data_size = 0; - auto count = static_cast(this->_internal_cols_size()); - - for (std::size_t i = 0; i < count; ++i) + size_t data_size = 0; + unsigned int count = static_cast(this->_internal_cols_size()); + for (unsigned int i = 0; i < count; i++) { data_size += ::_pbi::WireFormatLite::EnumSize( this->_internal_cols(static_cast(i)) ); } - total_size += data_size; if (data_size > 0) { - total_size += 1; - total_size += ::_pbi::WireFormatLite::Int32Size( - static_cast(data_size) - ); + total_size += 1 + + ::_pbi::WireFormatLite::Int32Size(static_cast(data_size)); } - _impl_._cols_cached_byte_size_.Set(::_pbi::ToCachedSize(data_size)); + int cached_size = ::_pbi::ToCachedSize(data_size); + _impl_._cols_cached_byte_size_.store(cached_size, std::memory_order_relaxed); + total_size += data_size; } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -5018,11 +4295,11 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfMap.Row) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; - _this->_internal_mutable_cols()->MergeFrom(from._internal_cols()); + _this->_impl_.cols_.MergeFrom(from._impl_.cols_); _this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -5044,9 +4321,7 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - _internal_mutable_cols()->InternalSwap( - other->_internal_mutable_cols() - ); + _impl_.cols_.InternalSwap(&other->_impl_.cols_); } ::PROTOBUF_NAMESPACE_ID::Metadata MessageOfMap_Row::GetMetadata() const @@ -5055,6 +4330,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[9] ); } + // =================================================================== class MessageOfMap::_Internal @@ -5062,10 +4338,10 @@ namespace protobuf public: }; - MessageOfMap::MessageOfMap(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfMap::MessageOfMap(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfMap) } MessageOfMap::MessageOfMap(const MessageOfMap& from) : @@ -5074,30 +4350,21 @@ namespace protobuf MessageOfMap* const _this = this; (void)_this; new (&_impl_) Impl_{ - decltype(_impl_.rows_){from._impl_.rows_}, decltype(_impl_.height_){} - - , - decltype(_impl_.width_){} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.rows_){from._impl_.rows_}, decltype(_impl_.height_){}, decltype(_impl_.width_){}, /*decltype(_impl_._cached_size_)*/ {}}; _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - ::memcpy(&_impl_.height_, &from._impl_.height_, static_cast<::size_t>(reinterpret_cast(&_impl_.width_) - reinterpret_cast(&_impl_.height_)) + sizeof(_impl_.width_)); + ::memcpy(&_impl_.height_, &from._impl_.height_, static_cast(reinterpret_cast(&_impl_.width_) - reinterpret_cast(&_impl_.height_)) + sizeof(_impl_.width_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfMap) } - inline void MessageOfMap::SharedCtor(::_pb::Arena* arena) + inline void MessageOfMap::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.rows_){arena}, decltype(_impl_.height_){0u} - - , - decltype(_impl_.width_){0u} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.rows_){arena}, decltype(_impl_.height_){0u}, decltype(_impl_.width_){0u}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfMap::~MessageOfMap() @@ -5113,8 +4380,8 @@ namespace protobuf inline void MessageOfMap::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); - _internal_mutable_rows()->~RepeatedPtrField(); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + _impl_.rows_.~RepeatedPtrField(); } void MessageOfMap::SetCachedSize(int size) const @@ -5125,12 +4392,12 @@ namespace protobuf void MessageOfMap::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfMap) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - _internal_mutable_rows()->Clear(); - ::memset(&_impl_.height_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.width_) - reinterpret_cast(&_impl_.height_)) + sizeof(_impl_.width_)); + _impl_.rows_.Clear(); + ::memset(&_impl_.height_, 0, static_cast(reinterpret_cast(&_impl_.width_) - reinterpret_cast(&_impl_.height_)) + sizeof(_impl_.width_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -5141,37 +4408,33 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // uint32 height = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.height_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // uint32 width = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.width_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // repeated .protobuf.MessageOfMap.Row rows = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 26)) { ptr -= 1; do @@ -5184,9 +4447,7 @@ namespace protobuf } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<26>(ptr)); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -5214,30 +4475,26 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfMap::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfMap::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfMap) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // uint32 height = 1; if (this->_internal_height() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteUInt32ToArray( - 1, this->_internal_height(), target - ); + target = ::_pbi::WireFormatLite::WriteUInt32ToArray(1, this->_internal_height(), target); } // uint32 width = 2; if (this->_internal_width() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteUInt32ToArray( - 2, this->_internal_width(), target - ); + target = ::_pbi::WireFormatLite::WriteUInt32ToArray(2, this->_internal_width(), target); } // repeated .protobuf.MessageOfMap.Row rows = 3; @@ -5261,18 +4518,18 @@ namespace protobuf return target; } - ::size_t MessageOfMap::ByteSizeLong() const + size_t MessageOfMap::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfMap) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // repeated .protobuf.MessageOfMap.Row rows = 3; total_size += 1UL * this->_internal_rows_size(); - for (const auto& msg : this->_internal_rows()) + for (const auto& msg : this->_impl_.rows_) { total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); @@ -5281,17 +4538,13 @@ namespace protobuf // uint32 height = 1; if (this->_internal_height() != 0) { - total_size += ::_pbi::WireFormatLite::UInt32SizePlusOne( - this->_internal_height() - ); + total_size += ::_pbi::WireFormatLite::UInt32SizePlusOne(this->_internal_height()); } // uint32 width = 2; if (this->_internal_width() != 0) { - total_size += ::_pbi::WireFormatLite::UInt32SizePlusOne( - this->_internal_width() - ); + total_size += ::_pbi::WireFormatLite::UInt32SizePlusOne(this->_internal_width()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -5310,11 +4563,11 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfMap) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; - _this->_internal_mutable_rows()->MergeFrom(from._internal_rows()); + _this->_impl_.rows_.MergeFrom(from._impl_.rows_); if (from._internal_height() != 0) { _this->_internal_set_height(from._internal_height()); @@ -5344,7 +4597,7 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - _internal_mutable_rows()->InternalSwap(other->_internal_mutable_rows()); + _impl_.rows_.InternalSwap(&other->_impl_.rows_); ::PROTOBUF_NAMESPACE_ID::internal::memswap< PROTOBUF_FIELD_OFFSET(MessageOfMap, _impl_.width_) + sizeof(MessageOfMap::_impl_.width_) - PROTOBUF_FIELD_OFFSET(MessageOfMap, _impl_.height_)>( reinterpret_cast(&_impl_.height_), @@ -5358,6 +4611,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[10] ); } + // =================================================================== class MessageOfTeam::_Internal @@ -5365,42 +4619,33 @@ namespace protobuf public: }; - MessageOfTeam::MessageOfTeam(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfTeam::MessageOfTeam(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfTeam) } MessageOfTeam::MessageOfTeam(const MessageOfTeam& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfTeam* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.team_id_){}, decltype(_impl_.player_id_){}, decltype(_impl_.score_){}, decltype(_impl_.money_){}, decltype(_impl_.guid_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.team_id_, &from._impl_.team_id_, static_cast(reinterpret_cast(&_impl_.guid_) - reinterpret_cast(&_impl_.team_id_)) + sizeof(_impl_.guid_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfTeam) } - inline void MessageOfTeam::SharedCtor(::_pb::Arena* arena) + inline void MessageOfTeam::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.score_){0} - - , - decltype(_impl_.money_){0} - - , - decltype(_impl_.guid_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.score_){0}, decltype(_impl_.money_){0}, decltype(_impl_.guid_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfTeam::~MessageOfTeam() @@ -5416,7 +4661,7 @@ namespace protobuf inline void MessageOfTeam::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfTeam::SetCachedSize(int size) const @@ -5427,11 +4672,11 @@ namespace protobuf void MessageOfTeam::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfTeam) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.team_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.guid_) - reinterpret_cast(&_impl_.team_id_)) + sizeof(_impl_.guid_)); + ::memset(&_impl_.team_id_, 0, static_cast(reinterpret_cast(&_impl_.guid_) - reinterpret_cast(&_impl_.team_id_)) + sizeof(_impl_.guid_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -5442,69 +4687,59 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 team_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 player_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 score = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.score_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 money = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.money_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 guid = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.guid_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -5532,57 +4767,47 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfTeam::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfTeam::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfTeam) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 team_id = 1; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_team_id(), target); } // int64 player_id = 2; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_player_id(), target); } // int32 score = 3; if (this->_internal_score() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_score(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_score(), target); } // int32 money = 4; if (this->_internal_money() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 4, this->_internal_money(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_money(), target); } // int64 guid = 5; if (this->_internal_guid() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 5, this->_internal_guid(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_guid(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -5595,53 +4820,43 @@ namespace protobuf return target; } - ::size_t MessageOfTeam::ByteSizeLong() const + size_t MessageOfTeam::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfTeam) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 team_id = 1; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // int64 player_id = 2; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int32 score = 3; if (this->_internal_score() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_score() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_score()); } // int32 money = 4; if (this->_internal_money() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_money() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_money()); } // int64 guid = 5; if (this->_internal_guid() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_guid() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_guid()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -5660,8 +4875,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfTeam) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_team_id() != 0) @@ -5718,13 +4933,12 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[11] ); } + // =================================================================== class MessageOfObj::_Internal { public: - static constexpr ::int32_t kOneofCaseOffset = - PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfObj, _impl_._oneof_case_); static const ::protobuf::MessageOfShip& ship_message(const MessageOfObj* msg); static const ::protobuf::MessageOfBullet& bullet_message(const MessageOfObj* msg); static const ::protobuf::MessageOfFactory& factory_message(const MessageOfObj* msg); @@ -6027,10 +5241,10 @@ namespace protobuf } // @@protoc_insertion_point(field_set_allocated:protobuf.MessageOfObj.team_message) } - MessageOfObj::MessageOfObj(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfObj::MessageOfObj(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfObj) } MessageOfObj::MessageOfObj(const MessageOfObj& from) : @@ -6137,9 +5351,12 @@ namespace protobuf // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfObj) } - inline void MessageOfObj::SharedCtor(::_pb::Arena* arena) + inline void MessageOfObj::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ decltype(_impl_.message_of_obj_){}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}}; clear_has_message_of_obj(); @@ -6158,7 +5375,7 @@ namespace protobuf inline void MessageOfObj::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); if (has_message_of_obj()) { clear_message_of_obj(); @@ -6282,7 +5499,7 @@ namespace protobuf void MessageOfObj::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfObj) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; @@ -6297,153 +5514,129 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // .protobuf.MessageOfShip ship_message = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 10)) { ptr = ctx->ParseMessage(_internal_mutable_ship_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfBullet bullet_message = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 18)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 18)) { ptr = ctx->ParseMessage(_internal_mutable_bullet_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfFactory factory_message = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 26)) { ptr = ctx->ParseMessage(_internal_mutable_factory_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfCommunity community_message = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 34)) { ptr = ctx->ParseMessage(_internal_mutable_community_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfFort fort_message = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 42)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 42)) { ptr = ctx->ParseMessage(_internal_mutable_fort_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfWormhole wormhole_message = 6; case 6: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 50)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 50)) { ptr = ctx->ParseMessage(_internal_mutable_wormhole_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfHome home_message = 7; case 7: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 58)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 58)) { ptr = ctx->ParseMessage(_internal_mutable_home_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfResource resource_message = 8; case 8: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 66)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 66)) { ptr = ctx->ParseMessage(_internal_mutable_resource_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfMap map_message = 9; case 9: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 74)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 74)) { ptr = ctx->ParseMessage(_internal_mutable_map_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfNews news_message = 10; case 10: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 82)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 82)) { ptr = ctx->ParseMessage(_internal_mutable_news_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfBombedBullet bombed_bullet_message = 11; case 11: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 90)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 90)) { ptr = ctx->ParseMessage(_internal_mutable_bombed_bullet_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfTeam team_message = 12; case 12: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 98)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 98)) { ptr = ctx->ParseMessage(_internal_mutable_team_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -6471,90 +5664,98 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfObj::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream - ) const - { - // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfObj) - ::uint32_t cached_has_bits = 0; - (void)cached_has_bits; + uint8_t* MessageOfObj::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + ) const + { + // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfObj) + uint32_t cached_has_bits = 0; + (void)cached_has_bits; + + // .protobuf.MessageOfShip ship_message = 1; + if (_internal_has_ship_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(1, _Internal::ship_message(this), _Internal::ship_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfBullet bullet_message = 2; + if (_internal_has_bullet_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(2, _Internal::bullet_message(this), _Internal::bullet_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfFactory factory_message = 3; + if (_internal_has_factory_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(3, _Internal::factory_message(this), _Internal::factory_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfCommunity community_message = 4; + if (_internal_has_community_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(4, _Internal::community_message(this), _Internal::community_message(this).GetCachedSize(), target, stream); + } - switch (message_of_obj_case()) + // .protobuf.MessageOfFort fort_message = 5; + if (_internal_has_fort_message()) { - case kShipMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(1, _Internal::ship_message(this), _Internal::ship_message(this).GetCachedSize(), target, stream); - break; - } - case kBulletMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(2, _Internal::bullet_message(this), _Internal::bullet_message(this).GetCachedSize(), target, stream); - break; - } - case kFactoryMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(3, _Internal::factory_message(this), _Internal::factory_message(this).GetCachedSize(), target, stream); - break; - } - case kCommunityMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(4, _Internal::community_message(this), _Internal::community_message(this).GetCachedSize(), target, stream); - break; - } - case kFortMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(5, _Internal::fort_message(this), _Internal::fort_message(this).GetCachedSize(), target, stream); - break; - } - case kWormholeMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(6, _Internal::wormhole_message(this), _Internal::wormhole_message(this).GetCachedSize(), target, stream); - break; - } - case kHomeMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(7, _Internal::home_message(this), _Internal::home_message(this).GetCachedSize(), target, stream); - break; - } - case kResourceMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(8, _Internal::resource_message(this), _Internal::resource_message(this).GetCachedSize(), target, stream); - break; - } - case kMapMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(9, _Internal::map_message(this), _Internal::map_message(this).GetCachedSize(), target, stream); - break; - } - case kNewsMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(10, _Internal::news_message(this), _Internal::news_message(this).GetCachedSize(), target, stream); - break; - } - case kBombedBulletMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(11, _Internal::bombed_bullet_message(this), _Internal::bombed_bullet_message(this).GetCachedSize(), target, stream); - break; - } - case kTeamMessage: - { - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: - InternalWriteMessage(12, _Internal::team_message(this), _Internal::team_message(this).GetCachedSize(), target, stream); - break; - } - default:; + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(5, _Internal::fort_message(this), _Internal::fort_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfWormhole wormhole_message = 6; + if (_internal_has_wormhole_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(6, _Internal::wormhole_message(this), _Internal::wormhole_message(this).GetCachedSize(), target, stream); } + + // .protobuf.MessageOfHome home_message = 7; + if (_internal_has_home_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(7, _Internal::home_message(this), _Internal::home_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfResource resource_message = 8; + if (_internal_has_resource_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(8, _Internal::resource_message(this), _Internal::resource_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfMap map_message = 9; + if (_internal_has_map_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(9, _Internal::map_message(this), _Internal::map_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfNews news_message = 10; + if (_internal_has_news_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(10, _Internal::news_message(this), _Internal::news_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfBombedBullet bombed_bullet_message = 11; + if (_internal_has_bombed_bullet_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(11, _Internal::bombed_bullet_message(this), _Internal::bombed_bullet_message(this).GetCachedSize(), target, stream); + } + + // .protobuf.MessageOfTeam team_message = 12; + if (_internal_has_team_message()) + { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(12, _Internal::team_message(this), _Internal::team_message(this).GetCachedSize(), target, stream); + } + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::_pbi::WireFormat::InternalSerializeUnknownFieldsToArray( @@ -6565,12 +5766,12 @@ namespace protobuf return target; } - ::size_t MessageOfObj::ByteSizeLong() const + size_t MessageOfObj::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfObj) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; @@ -6705,8 +5906,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfObj) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; switch (from.message_of_obj_case()) @@ -6831,6 +6032,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[12] ); } + // =================================================================== class MessageOfAll::_Internal @@ -6838,36 +6040,33 @@ namespace protobuf public: }; - MessageOfAll::MessageOfAll(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfAll::MessageOfAll(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfAll) } MessageOfAll::MessageOfAll(const MessageOfAll& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MessageOfAll* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.game_time_){}, decltype(_impl_.red_team_score_){}, decltype(_impl_.blue_team_score_){}, decltype(_impl_.red_team_money_){}, decltype(_impl_.blue_team_money_){}, decltype(_impl_.red_home_hp_){}, decltype(_impl_.blue_home_hp_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.game_time_, &from._impl_.game_time_, static_cast(reinterpret_cast(&_impl_.blue_home_hp_) - reinterpret_cast(&_impl_.game_time_)) + sizeof(_impl_.blue_home_hp_)); // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfAll) } - inline void MessageOfAll::SharedCtor(::_pb::Arena* arena) + inline void MessageOfAll::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.game_time_){0} - - , - decltype(_impl_.red_team_score_){0} - - , - decltype(_impl_.blue_team_score_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.game_time_){0}, decltype(_impl_.red_team_score_){0}, decltype(_impl_.blue_team_score_){0}, decltype(_impl_.red_team_money_){0}, decltype(_impl_.blue_team_money_){0}, decltype(_impl_.red_home_hp_){0}, decltype(_impl_.blue_home_hp_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageOfAll::~MessageOfAll() @@ -6883,7 +6082,7 @@ namespace protobuf inline void MessageOfAll::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MessageOfAll::SetCachedSize(int size) const @@ -6894,11 +6093,11 @@ namespace protobuf void MessageOfAll::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfAll) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.game_time_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.blue_team_score_) - reinterpret_cast(&_impl_.game_time_)) + sizeof(_impl_.blue_team_score_)); + ::memset(&_impl_.game_time_, 0, static_cast(reinterpret_cast(&_impl_.blue_home_hp_) - reinterpret_cast(&_impl_.game_time_)) + sizeof(_impl_.blue_home_hp_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -6909,45 +6108,79 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 game_time = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.game_time_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 red_team_score = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.red_team_score_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 blue_team_score = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.blue_team_score_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else + goto handle_unusual; + continue; + // int32 red_team_money = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) + { + _impl_.red_team_money_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + CHK_(ptr); + } + else + goto handle_unusual; + continue; + // int32 blue_team_money = 5; + case 5: + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) + { + _impl_.blue_team_money_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + CHK_(ptr); + } + else + goto handle_unusual; + continue; + // int32 red_home_hp = 6; + case 6: + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 48)) { + _impl_.red_home_hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + CHK_(ptr); + } + else goto handle_unusual; + continue; + // int32 blue_home_hp = 7; + case 7: + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 56)) + { + _impl_.blue_home_hp_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + CHK_(ptr); } + else + goto handle_unusual; continue; default: goto handle_unusual; @@ -6975,39 +6208,61 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfAll::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfAll::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfAll) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 game_time = 1; if (this->_internal_game_time() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_game_time(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_game_time(), target); } // int32 red_team_score = 2; if (this->_internal_red_team_score() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_red_team_score(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_red_team_score(), target); } // int32 blue_team_score = 3; if (this->_internal_blue_team_score() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 3, this->_internal_blue_team_score(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(3, this->_internal_blue_team_score(), target); + } + + // int32 red_team_money = 4; + if (this->_internal_red_team_money() != 0) + { + target = stream->EnsureSpace(target); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_red_team_money(), target); + } + + // int32 blue_team_money = 5; + if (this->_internal_blue_team_money() != 0) + { + target = stream->EnsureSpace(target); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(5, this->_internal_blue_team_money(), target); + } + + // int32 red_home_hp = 6; + if (this->_internal_red_home_hp() != 0) + { + target = stream->EnsureSpace(target); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(6, this->_internal_red_home_hp(), target); + } + + // int32 blue_home_hp = 7; + if (this->_internal_blue_home_hp() != 0) + { + target = stream->EnsureSpace(target); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(7, this->_internal_blue_home_hp(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -7020,37 +6275,55 @@ namespace protobuf return target; } - ::size_t MessageOfAll::ByteSizeLong() const + size_t MessageOfAll::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfAll) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 game_time = 1; if (this->_internal_game_time() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_game_time() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_game_time()); } // int32 red_team_score = 2; if (this->_internal_red_team_score() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_red_team_score() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_red_team_score()); } // int32 blue_team_score = 3; if (this->_internal_blue_team_score() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_blue_team_score() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_blue_team_score()); + } + + // int32 red_team_money = 4; + if (this->_internal_red_team_money() != 0) + { + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_red_team_money()); + } + + // int32 blue_team_money = 5; + if (this->_internal_blue_team_money() != 0) + { + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_blue_team_money()); + } + + // int32 red_home_hp = 6; + if (this->_internal_red_home_hp() != 0) + { + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_red_home_hp()); + } + + // int32 blue_home_hp = 7; + if (this->_internal_blue_home_hp() != 0) + { + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_blue_home_hp()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -7069,8 +6342,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfAll) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_game_time() != 0) @@ -7085,6 +6358,22 @@ namespace protobuf { _this->_internal_set_blue_team_score(from._internal_blue_team_score()); } + if (from._internal_red_team_money() != 0) + { + _this->_internal_set_red_team_money(from._internal_red_team_money()); + } + if (from._internal_blue_team_money() != 0) + { + _this->_internal_set_blue_team_money(from._internal_blue_team_money()); + } + if (from._internal_red_home_hp() != 0) + { + _this->_internal_set_red_home_hp(from._internal_red_home_hp()); + } + if (from._internal_blue_home_hp() != 0) + { + _this->_internal_set_blue_home_hp(from._internal_blue_home_hp()); + } _this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -7107,7 +6396,7 @@ namespace protobuf using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); ::PROTOBUF_NAMESPACE_ID::internal::memswap< - PROTOBUF_FIELD_OFFSET(MessageOfAll, _impl_.blue_team_score_) + sizeof(MessageOfAll::_impl_.blue_team_score_) - PROTOBUF_FIELD_OFFSET(MessageOfAll, _impl_.game_time_)>( + PROTOBUF_FIELD_OFFSET(MessageOfAll, _impl_.blue_home_hp_) + sizeof(MessageOfAll::_impl_.blue_home_hp_) - PROTOBUF_FIELD_OFFSET(MessageOfAll, _impl_.game_time_)>( reinterpret_cast(&_impl_.game_time_), reinterpret_cast(&other->_impl_.game_time_) ); @@ -7119,19 +6408,13 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[13] ); } + // =================================================================== class MessageToClient::_Internal { public: - using HasBits = decltype(std::declval()._impl_._has_bits_); - static constexpr ::int32_t kHasBitsOffset = - 8 * PROTOBUF_FIELD_OFFSET(MessageToClient, _impl_._has_bits_); static const ::protobuf::MessageOfAll& all_message(const MessageToClient* msg); - static void set_has_all_message(HasBits* has_bits) - { - (*has_bits)[0] |= 1u; - } }; const ::protobuf::MessageOfAll& @@ -7139,10 +6422,10 @@ namespace protobuf { return *msg->_impl_.all_message_; } - MessageToClient::MessageToClient(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageToClient::MessageToClient(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageToClient) } MessageToClient::MessageToClient(const MessageToClient& from) : @@ -7151,10 +6434,10 @@ namespace protobuf MessageToClient* const _this = this; (void)_this; new (&_impl_) Impl_{ - decltype(_impl_._has_bits_){from._impl_._has_bits_}, /*decltype(_impl_._cached_size_)*/ {}, decltype(_impl_.obj_message_){from._impl_.obj_message_}, decltype(_impl_.all_message_){nullptr}, decltype(_impl_.game_state_){}}; + decltype(_impl_.obj_message_){from._impl_.obj_message_}, decltype(_impl_.all_message_){nullptr}, decltype(_impl_.game_state_){}, /*decltype(_impl_._cached_size_)*/ {}}; _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - if ((from._impl_._has_bits_[0] & 0x00000001u) != 0) + if (from._internal_has_all_message()) { _this->_impl_.all_message_ = new ::protobuf::MessageOfAll(*from._impl_.all_message_); } @@ -7162,13 +6445,14 @@ namespace protobuf // @@protoc_insertion_point(copy_constructor:protobuf.MessageToClient) } - inline void MessageToClient::SharedCtor(::_pb::Arena* arena) + inline void MessageToClient::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_._has_bits_){}, /*decltype(_impl_._cached_size_)*/ {}, decltype(_impl_.obj_message_){arena}, decltype(_impl_.all_message_){nullptr}, decltype(_impl_.game_state_){0} - - }; + decltype(_impl_.obj_message_){arena}, decltype(_impl_.all_message_){nullptr}, decltype(_impl_.game_state_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } MessageToClient::~MessageToClient() @@ -7184,8 +6468,8 @@ namespace protobuf inline void MessageToClient::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); - _internal_mutable_obj_message()->~RepeatedPtrField(); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + _impl_.obj_message_.~RepeatedPtrField(); if (this != internal_default_instance()) delete _impl_.all_message_; } @@ -7198,19 +6482,17 @@ namespace protobuf void MessageToClient::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageToClient) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - _internal_mutable_obj_message()->Clear(); - cached_has_bits = _impl_._has_bits_[0]; - if (cached_has_bits & 0x00000001u) + _impl_.obj_message_.Clear(); + if (GetArenaForAllocation() == nullptr && _impl_.all_message_ != nullptr) { - ABSL_DCHECK(_impl_.all_message_ != nullptr); - _impl_.all_message_->Clear(); + delete _impl_.all_message_; } + _impl_.all_message_ = nullptr; _impl_.game_state_ = 0; - _impl_._has_bits_.Clear(); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -7219,16 +6501,15 @@ namespace protobuf #define CHK_(x) \ if (PROTOBUF_PREDICT_FALSE(!(x))) \ goto failure - _Internal::HasBits has_bits{}; while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // repeated .protobuf.MessageOfObj obj_message = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 10)) { ptr -= 1; do @@ -7241,34 +6522,28 @@ namespace protobuf } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); } else - { goto handle_unusual; - } continue; // .protobuf.GameState game_state = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_game_state(static_cast<::protobuf::GameState>(val)); } else - { goto handle_unusual; - } continue; // .protobuf.MessageOfAll all_message = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 26)) { ptr = ctx->ParseMessage(_internal_mutable_all_message(), ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -7289,7 +6564,6 @@ namespace protobuf CHK_(ptr != nullptr); } // while message_done: - _impl_._has_bits_.Or(has_bits); return ptr; failure: ptr = nullptr; @@ -7297,12 +6571,12 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageToClient::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageToClient::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageToClient) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // repeated .protobuf.MessageOfObj obj_message = 1; @@ -7325,9 +6599,8 @@ namespace protobuf ); } - cached_has_bits = _impl_._has_bits_[0]; // .protobuf.MessageOfAll all_message = 3; - if (cached_has_bits & 0x00000001u) + if (this->_internal_has_all_message()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: InternalWriteMessage(3, _Internal::all_message(this), _Internal::all_message(this).GetCachedSize(), target, stream); @@ -7343,26 +6616,25 @@ namespace protobuf return target; } - ::size_t MessageToClient::ByteSizeLong() const + size_t MessageToClient::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageToClient) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // repeated .protobuf.MessageOfObj obj_message = 1; total_size += 1UL * this->_internal_obj_message_size(); - for (const auto& msg : this->_internal_obj_message()) + for (const auto& msg : this->_impl_.obj_message_) { total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); } // .protobuf.MessageOfAll all_message = 3; - cached_has_bits = _impl_._has_bits_[0]; - if (cached_has_bits & 0x00000001u) + if (this->_internal_has_all_message()) { total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( @@ -7393,12 +6665,12 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageToClient) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; - _this->_internal_mutable_obj_message()->MergeFrom(from._internal_obj_message()); - if ((from._impl_._has_bits_[0] & 0x00000001u) != 0) + _this->_impl_.obj_message_.MergeFrom(from._impl_.obj_message_); + if (from._internal_has_all_message()) { _this->_internal_mutable_all_message()->::protobuf::MessageOfAll::MergeFrom( from._internal_all_message() @@ -7429,8 +6701,7 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - swap(_impl_._has_bits_[0], other->_impl_._has_bits_[0]); - _internal_mutable_obj_message()->InternalSwap(other->_internal_mutable_obj_message()); + _impl_.obj_message_.InternalSwap(&other->_impl_.obj_message_); ::PROTOBUF_NAMESPACE_ID::internal::memswap< PROTOBUF_FIELD_OFFSET(MessageToClient, _impl_.game_state_) + sizeof(MessageToClient::_impl_.game_state_) - PROTOBUF_FIELD_OFFSET(MessageToClient, _impl_.all_message_)>( reinterpret_cast(&_impl_.all_message_), @@ -7444,6 +6715,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[14] ); } + // =================================================================== class MoveRes::_Internal @@ -7451,36 +6723,33 @@ namespace protobuf public: }; - MoveRes::MoveRes(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MoveRes::MoveRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MoveRes) } MoveRes::MoveRes(const MoveRes& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MoveRes* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.actual_speed_){}, decltype(_impl_.actual_angle_){}, decltype(_impl_.act_success_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.actual_speed_, &from._impl_.actual_speed_, static_cast(reinterpret_cast(&_impl_.act_success_) - reinterpret_cast(&_impl_.actual_speed_)) + sizeof(_impl_.act_success_)); // @@protoc_insertion_point(copy_constructor:protobuf.MoveRes) } - inline void MoveRes::SharedCtor(::_pb::Arena* arena) + inline void MoveRes::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.actual_speed_){::int64_t{0}} - - , - decltype(_impl_.actual_angle_){0} - - , - decltype(_impl_.act_success_){false} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.actual_speed_){int64_t{0}}, decltype(_impl_.actual_angle_){0}, decltype(_impl_.act_success_){false}, /*decltype(_impl_._cached_size_)*/ {}}; } MoveRes::~MoveRes() @@ -7496,7 +6765,7 @@ namespace protobuf inline void MoveRes::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MoveRes::SetCachedSize(int size) const @@ -7507,11 +6776,11 @@ namespace protobuf void MoveRes::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MoveRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.actual_speed_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.act_success_) - reinterpret_cast(&_impl_.actual_speed_)) + sizeof(_impl_.act_success_)); + ::memset(&_impl_.actual_speed_, 0, static_cast(reinterpret_cast(&_impl_.act_success_) - reinterpret_cast(&_impl_.actual_speed_)) + sizeof(_impl_.act_success_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -7522,45 +6791,39 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 actual_speed = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.actual_speed_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double actual_angle = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 17)) { _impl_.actual_angle_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // bool act_success = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.act_success_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -7588,43 +6851,37 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MoveRes::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MoveRes::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MoveRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 actual_speed = 1; if (this->_internal_actual_speed() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_actual_speed(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_actual_speed(), target); } // double actual_angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_actual_angle = this->_internal_actual_angle(); - ::uint64_t raw_actual_angle; + uint64_t raw_actual_angle; memcpy(&raw_actual_angle, &tmp_actual_angle, sizeof(tmp_actual_angle)); if (raw_actual_angle != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 2, this->_internal_actual_angle(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_actual_angle(), target); } // bool act_success = 3; if (this->_internal_act_success() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteBoolToArray( - 3, this->_internal_act_success(), target - ); + target = ::_pbi::WireFormatLite::WriteBoolToArray(3, this->_internal_act_success(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -7637,37 +6894,35 @@ namespace protobuf return target; } - ::size_t MoveRes::ByteSizeLong() const + size_t MoveRes::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MoveRes) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 actual_speed = 1; if (this->_internal_actual_speed() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_actual_speed() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_actual_speed()); } // double actual_angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_actual_angle = this->_internal_actual_angle(); - ::uint64_t raw_actual_angle; + uint64_t raw_actual_angle; memcpy(&raw_actual_angle, &tmp_actual_angle, sizeof(tmp_actual_angle)); if (raw_actual_angle != 0) { - total_size += 9; + total_size += 1 + 8; } // bool act_success = 3; if (this->_internal_act_success() != 0) { - total_size += 2; + total_size += 1 + 1; } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -7686,17 +6941,17 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MoveRes) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_actual_speed() != 0) { _this->_internal_set_actual_speed(from._internal_actual_speed()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_actual_angle = from._internal_actual_angle(); - ::uint64_t raw_actual_angle; + uint64_t raw_actual_angle; memcpy(&raw_actual_angle, &tmp_actual_angle, sizeof(tmp_actual_angle)); if (raw_actual_angle != 0) { @@ -7740,6 +6995,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[15] ); } + // =================================================================== class BoolRes::_Internal @@ -7747,30 +7003,33 @@ namespace protobuf public: }; - BoolRes::BoolRes(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + BoolRes::BoolRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.BoolRes) } BoolRes::BoolRes(const BoolRes& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + BoolRes* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.act_success_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + _this->_impl_.act_success_ = from._impl_.act_success_; // @@protoc_insertion_point(copy_constructor:protobuf.BoolRes) } - inline void BoolRes::SharedCtor(::_pb::Arena* arena) + inline void BoolRes::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.act_success_){false} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.act_success_){false}, /*decltype(_impl_._cached_size_)*/ {}}; } BoolRes::~BoolRes() @@ -7786,7 +7045,7 @@ namespace protobuf inline void BoolRes::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void BoolRes::SetCachedSize(int size) const @@ -7797,7 +7056,7 @@ namespace protobuf void BoolRes::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.BoolRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; @@ -7812,21 +7071,19 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // bool act_success = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.act_success_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -7854,21 +7111,19 @@ namespace protobuf #undef CHK_ } - ::uint8_t* BoolRes::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* BoolRes::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.BoolRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // bool act_success = 1; if (this->_internal_act_success() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteBoolToArray( - 1, this->_internal_act_success(), target - ); + target = ::_pbi::WireFormatLite::WriteBoolToArray(1, this->_internal_act_success(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -7881,19 +7136,19 @@ namespace protobuf return target; } - ::size_t BoolRes::ByteSizeLong() const + size_t BoolRes::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.BoolRes) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // bool act_success = 1; if (this->_internal_act_success() != 0) { - total_size += 2; + total_size += 1 + 1; } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -7912,8 +7167,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.BoolRes) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_act_success() != 0) @@ -7941,7 +7196,6 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - swap(_impl_.act_success_, other->_impl_.act_success_); } @@ -7951,6 +7205,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[16] ); } + // =================================================================== class ShipInfoRes::_Internal @@ -7958,10 +7213,10 @@ namespace protobuf public: }; - ShipInfoRes::ShipInfoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + ShipInfoRes::ShipInfoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.ShipInfoRes) } ShipInfoRes::ShipInfoRes(const ShipInfoRes& from) : @@ -7976,9 +7231,12 @@ namespace protobuf // @@protoc_insertion_point(copy_constructor:protobuf.ShipInfoRes) } - inline void ShipInfoRes::SharedCtor(::_pb::Arena* arena) + inline void ShipInfoRes::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ decltype(_impl_.ship_info_){arena}, /*decltype(_impl_._cached_size_)*/ {}}; } @@ -7996,8 +7254,8 @@ namespace protobuf inline void ShipInfoRes::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); - _internal_mutable_ship_info()->~RepeatedPtrField(); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); + _impl_.ship_info_.~RepeatedPtrField(); } void ShipInfoRes::SetCachedSize(int size) const @@ -8008,11 +7266,11 @@ namespace protobuf void ShipInfoRes::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.ShipInfoRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - _internal_mutable_ship_info()->Clear(); + _impl_.ship_info_.Clear(); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -8023,13 +7281,13 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // repeated .protobuf.MessageOfShip ship_info = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 10)) { ptr -= 1; do @@ -8042,9 +7300,7 @@ namespace protobuf } while (::PROTOBUF_NAMESPACE_ID::internal::ExpectTag<10>(ptr)); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -8072,12 +7328,12 @@ namespace protobuf #undef CHK_ } - ::uint8_t* ShipInfoRes::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* ShipInfoRes::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.ShipInfoRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // repeated .protobuf.MessageOfShip ship_info = 1; @@ -8101,18 +7357,18 @@ namespace protobuf return target; } - ::size_t ShipInfoRes::ByteSizeLong() const + size_t ShipInfoRes::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.ShipInfoRes) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // repeated .protobuf.MessageOfShip ship_info = 1; total_size += 1UL * this->_internal_ship_info_size(); - for (const auto& msg : this->_internal_ship_info()) + for (const auto& msg : this->_impl_.ship_info_) { total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(msg); @@ -8134,11 +7390,11 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.ShipInfoRes) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; - _this->_internal_mutable_ship_info()->MergeFrom(from._internal_ship_info()); + _this->_impl_.ship_info_.MergeFrom(from._impl_.ship_info_); _this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); } @@ -8160,7 +7416,7 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - _internal_mutable_ship_info()->InternalSwap(other->_internal_mutable_ship_info()); + _impl_.ship_info_.InternalSwap(&other->_impl_.ship_info_); } ::PROTOBUF_NAMESPACE_ID::Metadata ShipInfoRes::GetMetadata() const @@ -8169,6 +7425,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[17] ); } + // =================================================================== class EcoRes::_Internal @@ -8176,30 +7433,33 @@ namespace protobuf public: }; - EcoRes::EcoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + EcoRes::EcoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.EcoRes) } EcoRes::EcoRes(const EcoRes& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + EcoRes* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.economy_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + _this->_impl_.economy_ = from._impl_.economy_; // @@protoc_insertion_point(copy_constructor:protobuf.EcoRes) } - inline void EcoRes::SharedCtor(::_pb::Arena* arena) + inline void EcoRes::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.economy_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.economy_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } EcoRes::~EcoRes() @@ -8215,7 +7475,7 @@ namespace protobuf inline void EcoRes::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void EcoRes::SetCachedSize(int size) const @@ -8226,11 +7486,11 @@ namespace protobuf void EcoRes::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.EcoRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - _impl_.economy_ = ::int64_t{0}; + _impl_.economy_ = int64_t{0}; _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -8241,21 +7501,19 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 economy = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.economy_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -8283,21 +7541,19 @@ namespace protobuf #undef CHK_ } - ::uint8_t* EcoRes::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* EcoRes::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.EcoRes) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 economy = 1; if (this->_internal_economy() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_economy(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_economy(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -8310,21 +7566,19 @@ namespace protobuf return target; } - ::size_t EcoRes::ByteSizeLong() const + size_t EcoRes::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.EcoRes) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 economy = 1; if (this->_internal_economy() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_economy() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_economy()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -8343,8 +7597,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.EcoRes) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_economy() != 0) @@ -8372,7 +7626,6 @@ namespace protobuf { using std::swap; _internal_metadata_.InternalSwap(&other->_internal_metadata_); - swap(_impl_.economy_, other->_impl_.economy_); } @@ -8382,19 +7635,18 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[18] ); } + // =================================================================== class MessageOfNews::_Internal { public: - static constexpr ::int32_t kOneofCaseOffset = - PROTOBUF_FIELD_OFFSET(::protobuf::MessageOfNews, _impl_._oneof_case_); }; - MessageOfNews::MessageOfNews(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MessageOfNews::MessageOfNews(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MessageOfNews) } MessageOfNews::MessageOfNews(const MessageOfNews& from) : @@ -8403,18 +7655,10 @@ namespace protobuf MessageOfNews* const _this = this; (void)_this; new (&_impl_) Impl_{ - decltype(_impl_.from_id_){} - - , - decltype(_impl_.to_id_){} - - , - decltype(_impl_.news_){}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}}; + decltype(_impl_.from_id_){}, decltype(_impl_.to_id_){}, decltype(_impl_.news_){}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}}; _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - ::memcpy(&_impl_.from_id_, &from._impl_.from_id_, static_cast<::size_t>(reinterpret_cast(&_impl_.to_id_) - reinterpret_cast(&_impl_.from_id_)) + sizeof(_impl_.to_id_)); + ::memcpy(&_impl_.from_id_, &from._impl_.from_id_, static_cast(reinterpret_cast(&_impl_.to_id_) - reinterpret_cast(&_impl_.from_id_)) + sizeof(_impl_.to_id_)); clear_has_news(); switch (from.news_case()) { @@ -8436,19 +7680,14 @@ namespace protobuf // @@protoc_insertion_point(copy_constructor:protobuf.MessageOfNews) } - inline void MessageOfNews::SharedCtor(::_pb::Arena* arena) + inline void MessageOfNews::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.from_id_){::int64_t{0}} - - , - decltype(_impl_.to_id_){::int64_t{0}} - - , - decltype(_impl_.news_){}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}}; + decltype(_impl_.from_id_){int64_t{0}}, decltype(_impl_.to_id_){int64_t{0}}, decltype(_impl_.news_){}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}}; clear_has_news(); } @@ -8465,7 +7704,7 @@ namespace protobuf inline void MessageOfNews::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); if (has_news()) { clear_news(); @@ -8503,11 +7742,11 @@ namespace protobuf void MessageOfNews::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MessageOfNews) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.from_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.to_id_) - reinterpret_cast(&_impl_.from_id_)) + sizeof(_impl_.to_id_)); + ::memset(&_impl_.from_id_, 0, static_cast(reinterpret_cast(&_impl_.to_id_) - reinterpret_cast(&_impl_.from_id_)) + sizeof(_impl_.to_id_)); clear_news(); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -8519,13 +7758,13 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // string text_message = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 10)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 10)) { auto str = _internal_mutable_text_message(); ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx); @@ -8533,46 +7772,38 @@ namespace protobuf CHK_(::_pbi::VerifyUTF8(str, "protobuf.MessageOfNews.text_message")); } else - { goto handle_unusual; - } continue; // int64 from_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.from_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 to_id = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.to_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // bytes binary_message = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 34)) { auto str = _internal_mutable_binary_message(); ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -8600,47 +7831,45 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MessageOfNews::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MessageOfNews::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MessageOfNews) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // string text_message = 1; - if (news_case() == kTextMessage) + if (_internal_has_text_message()) { - const std::string& _s = this->_internal_text_message(); ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - _s.data(), static_cast(_s.length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "protobuf.MessageOfNews.text_message" + this->_internal_text_message().data(), static_cast(this->_internal_text_message().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "protobuf.MessageOfNews.text_message" + ); + target = stream->WriteStringMaybeAliased( + 1, this->_internal_text_message(), target ); - target = stream->WriteStringMaybeAliased(1, _s, target); } // int64 from_id = 2; if (this->_internal_from_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_from_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_from_id(), target); } // int64 to_id = 3; if (this->_internal_to_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_to_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_to_id(), target); } // bytes binary_message = 4; - if (news_case() == kBinaryMessage) + if (_internal_has_binary_message()) { - const std::string& _s = this->_internal_binary_message(); - target = stream->WriteBytesMaybeAliased(4, _s, target); + target = stream->WriteBytesMaybeAliased( + 4, this->_internal_binary_message(), target + ); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -8653,29 +7882,25 @@ namespace protobuf return target; } - ::size_t MessageOfNews::ByteSizeLong() const + size_t MessageOfNews::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MessageOfNews) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 from_id = 2; if (this->_internal_from_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_from_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_from_id()); } // int64 to_id = 3; if (this->_internal_to_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_to_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_to_id()); } switch (news_case()) @@ -8683,17 +7908,19 @@ namespace protobuf // string text_message = 1; case kTextMessage: { - total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_text_message() - ); + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_text_message() + ); break; } // bytes binary_message = 4; case kBinaryMessage: { - total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_binary_message() - ); + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_binary_message() + ); break; } case NEWS_NOT_SET: @@ -8717,8 +7944,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MessageOfNews) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_from_id() != 0) @@ -8782,6 +8009,7 @@ namespace protobuf &descriptor_table_Message2Clients_2eproto_getter, &descriptor_table_Message2Clients_2eproto_once, file_level_metadata_Message2Clients_2eproto[19] ); } + // @@protoc_insertion_point(namespace_scope) } // namespace protobuf PROTOBUF_NAMESPACE_OPEN @@ -8906,5 +8134,6 @@ PROTOBUF_NOINLINE ::protobuf::MessageOfNews* return Arena::CreateMessageInternal<::protobuf::MessageOfNews>(arena); } PROTOBUF_NAMESPACE_CLOSE + // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" +#include diff --git a/CAPI/cpp/proto/Message2Clients.pb.h b/CAPI/cpp/proto/Message2Clients.pb.h index 6cd959a8..e78f79b7 100644 --- a/CAPI/cpp/proto/Message2Clients.pb.h +++ b/CAPI/cpp/proto/Message2Clients.pb.h @@ -1,44 +1,39 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: Message2Clients.proto -#ifndef GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto_2epb_2eh -#define GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto_2epb_2eh +#ifndef GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto #include #include -#include - -#include "google/protobuf/port_def.inc" -#if PROTOBUF_VERSION < 4023000 -#error "This file was generated by a newer version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please update" -#error "your headers." -#endif // PROTOBUF_VERSION - -#if 4023004 < PROTOBUF_MIN_PROTOC_VERSION -#error "This file was generated by an older version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please" -#error "regenerate this file with a newer version of protoc." -#endif // PROTOBUF_MIN_PROTOC_VERSION -#include "google/protobuf/port_undef.inc" -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/arena.h" -#include "google/protobuf/arenastring.h" -#include "google/protobuf/generated_message_util.h" -#include "google/protobuf/metadata_lite.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/message.h" -#include "google/protobuf/repeated_field.h" // IWYU pragma: export -#include "google/protobuf/extension_set.h" // IWYU pragma: export -#include "google/protobuf/unknown_field_set.h" + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021006 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include #include "MessageType.pb.h" // @@protoc_insertion_point(includes) - -// Must be included last. -#include "google/protobuf/port_def.inc" - +#include #define PROTOBUF_INTERNAL_EXPORT_Message2Clients_2eproto - PROTOBUF_NAMESPACE_OPEN namespace internal { @@ -49,10 +44,9 @@ PROTOBUF_NAMESPACE_CLOSE // Internal implementation detail -- do not use these members. struct TableStruct_Message2Clients_2eproto { - static const ::uint32_t offsets[]; + static const uint32_t offsets[]; }; -extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable - descriptor_table_Message2Clients_2eproto; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_Message2Clients_2eproto; namespace protobuf { class BoolRes; @@ -158,14 +152,11 @@ ::protobuf::MoveRes* Arena::CreateMaybeMessage<::protobuf::MoveRes>(Arena*); template<> ::protobuf::ShipInfoRes* Arena::CreateMaybeMessage<::protobuf::ShipInfoRes>(Arena*); PROTOBUF_NAMESPACE_CLOSE - namespace protobuf { // =================================================================== - // ------------------------------------------------------------------- - class MessageOfShip final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfShip) */ { @@ -175,7 +166,6 @@ namespace protobuf { } ~MessageOfShip() override; - template explicit PROTOBUF_CONSTEXPR MessageOfShip(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfShip(const MessageOfShip& from); @@ -210,15 +200,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -271,7 +252,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -296,10 +277,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -307,20 +288,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfShip* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfShip"; } protected: - explicit MessageOfShip(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfShip(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -355,92 +336,92 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int32 speed = 3; void clear_speed(); - ::int32_t speed() const; - void set_speed(::int32_t value); + int32_t speed() const; + void set_speed(int32_t value); private: - ::int32_t _internal_speed() const; - void _internal_set_speed(::int32_t value); + int32_t _internal_speed() const; + void _internal_set_speed(int32_t value); public: // int32 hp = 4; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // int32 armor = 5; void clear_armor(); - ::int32_t armor() const; - void set_armor(::int32_t value); + int32_t armor() const; + void set_armor(int32_t value); private: - ::int32_t _internal_armor() const; - void _internal_set_armor(::int32_t value); + int32_t _internal_armor() const; + void _internal_set_armor(int32_t value); public: // int32 shield = 6; void clear_shield(); - ::int32_t shield() const; - void set_shield(::int32_t value); + int32_t shield() const; + void set_shield(int32_t value); private: - ::int32_t _internal_shield() const; - void _internal_set_shield(::int32_t value); + int32_t _internal_shield() const; + void _internal_set_shield(int32_t value); public: // int64 team_id = 7; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int64 player_id = 8; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 guid = 9; void clear_guid(); - ::int64_t guid() const; - void set_guid(::int64_t value); + int64_t guid() const; + void set_guid(int64_t value); private: - ::int64_t _internal_guid() const; - void _internal_set_guid(::int64_t value); + int64_t _internal_guid() const; + void _internal_set_guid(int64_t value); public: // .protobuf.ShipState ship_state = 10; @@ -465,12 +446,12 @@ namespace protobuf public: // int32 view_range = 12; void clear_view_range(); - ::int32_t view_range() const; - void set_view_range(::int32_t value); + int32_t view_range() const; + void set_view_range(int32_t value); private: - ::int32_t _internal_view_range() const; - void _internal_set_view_range(::int32_t value); + int32_t _internal_view_range() const; + void _internal_set_view_range(int32_t value); public: // .protobuf.ProducerType producer_type = 13; @@ -544,18 +525,18 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int32_t speed_; - ::int32_t hp_; - ::int32_t armor_; - ::int32_t shield_; - ::int64_t team_id_; - ::int64_t player_id_; - ::int64_t guid_; + int32_t x_; + int32_t y_; + int32_t speed_; + int32_t hp_; + int32_t armor_; + int32_t shield_; + int64_t team_id_; + int64_t player_id_; + int64_t guid_; int ship_state_; int ship_type_; - ::int32_t view_range_; + int32_t view_range_; int producer_type_; int constructor_type_; int armor_type_; @@ -569,7 +550,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfBullet final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfBullet) */ @@ -580,7 +562,6 @@ namespace protobuf { } ~MessageOfBullet() override; - template explicit PROTOBUF_CONSTEXPR MessageOfBullet(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfBullet(const MessageOfBullet& from); @@ -615,15 +596,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -676,7 +648,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -701,10 +673,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -712,20 +684,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfBullet* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfBullet"; } protected: - explicit MessageOfBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -761,12 +733,12 @@ namespace protobuf public: // int32 x = 2; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // double facing_direction = 4; @@ -781,42 +753,42 @@ namespace protobuf public: // int32 y = 3; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int32 damage = 5; void clear_damage(); - ::int32_t damage() const; - void set_damage(::int32_t value); + int32_t damage() const; + void set_damage(int32_t value); private: - ::int32_t _internal_damage() const; - void _internal_set_damage(::int32_t value); + int32_t _internal_damage() const; + void _internal_set_damage(int32_t value); public: // int64 team_id = 6; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int64 guid = 7; void clear_guid(); - ::int64_t guid() const; - void set_guid(::int64_t value); + int64_t guid() const; + void set_guid(int64_t value); private: - ::int64_t _internal_guid() const; - void _internal_set_guid(::int64_t value); + int64_t _internal_guid() const; + void _internal_set_guid(int64_t value); public: // double bomb_range = 8; @@ -831,12 +803,12 @@ namespace protobuf public: // int32 speed = 9; void clear_speed(); - ::int32_t speed() const; - void set_speed(::int32_t value); + int32_t speed() const; + void set_speed(int32_t value); private: - ::int32_t _internal_speed() const; - void _internal_set_speed(::int32_t value); + int32_t _internal_speed() const; + void _internal_set_speed(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfBullet) @@ -851,14 +823,14 @@ namespace protobuf struct Impl_ { int type_; - ::int32_t x_; + int32_t x_; double facing_direction_; - ::int32_t y_; - ::int32_t damage_; - ::int64_t team_id_; - ::int64_t guid_; + int32_t y_; + int32_t damage_; + int64_t team_id_; + int64_t guid_; double bomb_range_; - ::int32_t speed_; + int32_t speed_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -866,7 +838,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfBombedBullet final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfBombedBullet) */ @@ -877,7 +850,6 @@ namespace protobuf { } ~MessageOfBombedBullet() override; - template explicit PROTOBUF_CONSTEXPR MessageOfBombedBullet(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfBombedBullet(const MessageOfBombedBullet& from); @@ -912,15 +884,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -973,7 +936,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -998,10 +961,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1009,20 +972,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfBombedBullet* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfBombedBullet"; } protected: - explicit MessageOfBombedBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfBombedBullet(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1055,12 +1018,12 @@ namespace protobuf public: // int32 x = 2; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // double facing_direction = 4; @@ -1075,12 +1038,12 @@ namespace protobuf public: // int64 mapping_id = 5; void clear_mapping_id(); - ::int64_t mapping_id() const; - void set_mapping_id(::int64_t value); + int64_t mapping_id() const; + void set_mapping_id(int64_t value); private: - ::int64_t _internal_mapping_id() const; - void _internal_set_mapping_id(::int64_t value); + int64_t _internal_mapping_id() const; + void _internal_set_mapping_id(int64_t value); public: // double bomb_range = 6; @@ -1095,12 +1058,12 @@ namespace protobuf public: // int32 y = 3; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfBombedBullet) @@ -1115,11 +1078,11 @@ namespace protobuf struct Impl_ { int type_; - ::int32_t x_; + int32_t x_; double facing_direction_; - ::int64_t mapping_id_; + int64_t mapping_id_; double bomb_range_; - ::int32_t y_; + int32_t y_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1127,7 +1090,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfFactory final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfFactory) */ @@ -1138,7 +1102,6 @@ namespace protobuf { } ~MessageOfFactory() override; - template explicit PROTOBUF_CONSTEXPR MessageOfFactory(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfFactory(const MessageOfFactory& from); @@ -1173,15 +1136,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1234,7 +1188,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1259,10 +1213,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1270,20 +1224,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfFactory* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfFactory"; } protected: - explicit MessageOfFactory(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfFactory(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1304,42 +1258,42 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int32 hp = 3; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfFactory) @@ -1353,10 +1307,10 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int64_t team_id_; - ::int32_t hp_; + int32_t x_; + int32_t y_; + int64_t team_id_; + int32_t hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1364,7 +1318,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfCommunity final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfCommunity) */ @@ -1375,7 +1330,6 @@ namespace protobuf { } ~MessageOfCommunity() override; - template explicit PROTOBUF_CONSTEXPR MessageOfCommunity(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfCommunity(const MessageOfCommunity& from); @@ -1410,15 +1364,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1471,7 +1416,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1496,10 +1441,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1507,20 +1452,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfCommunity* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfCommunity"; } protected: - explicit MessageOfCommunity(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfCommunity(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1541,42 +1486,42 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int32 hp = 3; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfCommunity) @@ -1590,10 +1535,10 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int64_t team_id_; - ::int32_t hp_; + int32_t x_; + int32_t y_; + int64_t team_id_; + int32_t hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1601,7 +1546,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfFort final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfFort) */ @@ -1612,7 +1558,6 @@ namespace protobuf { } ~MessageOfFort() override; - template explicit PROTOBUF_CONSTEXPR MessageOfFort(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfFort(const MessageOfFort& from); @@ -1647,15 +1592,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1708,7 +1644,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1733,10 +1669,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1744,20 +1680,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfFort* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfFort"; } protected: - explicit MessageOfFort(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfFort(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1778,42 +1714,42 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int32 hp = 3; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfFort) @@ -1827,10 +1763,10 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int64_t team_id_; - ::int32_t hp_; + int32_t x_; + int32_t y_; + int64_t team_id_; + int32_t hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1838,7 +1774,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfWormhole final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfWormhole) */ @@ -1849,7 +1786,6 @@ namespace protobuf { } ~MessageOfWormhole() override; - template explicit PROTOBUF_CONSTEXPR MessageOfWormhole(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfWormhole(const MessageOfWormhole& from); @@ -1884,15 +1820,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1945,7 +1872,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1970,10 +1897,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1981,20 +1908,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfWormhole* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfWormhole"; } protected: - explicit MessageOfWormhole(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfWormhole(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2014,32 +1941,32 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int32 hp = 3; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfWormhole) @@ -2053,9 +1980,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int32_t hp_; + int32_t x_; + int32_t y_; + int32_t hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -2063,7 +1990,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfResource final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfResource) */ @@ -2074,7 +2002,6 @@ namespace protobuf { } ~MessageOfResource() override; - template explicit PROTOBUF_CONSTEXPR MessageOfResource(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfResource(const MessageOfResource& from); @@ -2109,15 +2036,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2170,7 +2088,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2195,10 +2113,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2206,20 +2124,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfResource* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfResource"; } protected: - explicit MessageOfResource(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfResource(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2239,32 +2157,32 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int32 progress = 3; void clear_progress(); - ::int32_t progress() const; - void set_progress(::int32_t value); + int32_t progress() const; + void set_progress(int32_t value); private: - ::int32_t _internal_progress() const; - void _internal_set_progress(::int32_t value); + int32_t _internal_progress() const; + void _internal_set_progress(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfResource) @@ -2278,9 +2196,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int32_t progress_; + int32_t x_; + int32_t y_; + int32_t progress_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -2288,7 +2206,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfHome final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfHome) */ @@ -2299,7 +2218,6 @@ namespace protobuf { } ~MessageOfHome() override; - template explicit PROTOBUF_CONSTEXPR MessageOfHome(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfHome(const MessageOfHome& from); @@ -2334,15 +2252,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2395,7 +2304,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2420,10 +2329,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2431,20 +2340,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfHome* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfHome"; } protected: - explicit MessageOfHome(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfHome(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2465,42 +2374,42 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int32 hp = 3; void clear_hp(); - ::int32_t hp() const; - void set_hp(::int32_t value); + int32_t hp() const; + void set_hp(int32_t value); private: - ::int32_t _internal_hp() const; - void _internal_set_hp(::int32_t value); + int32_t _internal_hp() const; + void _internal_set_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfHome) @@ -2514,10 +2423,10 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int64_t team_id_; - ::int32_t hp_; + int32_t x_; + int32_t y_; + int64_t team_id_; + int32_t hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -2525,7 +2434,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfMap_Row final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfMap.Row) */ @@ -2536,7 +2446,6 @@ namespace protobuf { } ~MessageOfMap_Row() override; - template explicit PROTOBUF_CONSTEXPR MessageOfMap_Row(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfMap_Row(const MessageOfMap_Row& from); @@ -2571,15 +2480,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2632,7 +2532,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2657,10 +2557,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2668,20 +2568,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfMap_Row* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfMap.Row"; } protected: - explicit MessageOfMap_Row(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfMap_Row(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2706,6 +2606,11 @@ namespace protobuf public: void clear_cols(); + private: + ::protobuf::PlaceType _internal_cols(int index) const; + void _internal_add_cols(::protobuf::PlaceType value); + ::PROTOBUF_NAMESPACE_ID::RepeatedField* _internal_mutable_cols(); + public: ::protobuf::PlaceType cols(int index) const; void set_cols(int index, ::protobuf::PlaceType value); @@ -2713,13 +2618,6 @@ namespace protobuf const ::PROTOBUF_NAMESPACE_ID::RepeatedField& cols() const; ::PROTOBUF_NAMESPACE_ID::RepeatedField* mutable_cols(); - private: - ::protobuf::PlaceType _internal_cols(int index) const; - void _internal_add_cols(::protobuf::PlaceType value); - const ::PROTOBUF_NAMESPACE_ID::RepeatedField& _internal_cols() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedField* _internal_mutable_cols(); - - public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfMap.Row) private: @@ -2732,7 +2630,7 @@ namespace protobuf struct Impl_ { ::PROTOBUF_NAMESPACE_ID::RepeatedField cols_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cols_cached_byte_size_; + mutable std::atomic _cols_cached_byte_size_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -2740,7 +2638,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfMap final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfMap) */ @@ -2751,7 +2650,6 @@ namespace protobuf { } ~MessageOfMap() override; - template explicit PROTOBUF_CONSTEXPR MessageOfMap(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfMap(const MessageOfMap& from); @@ -2786,15 +2684,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2847,7 +2736,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2872,10 +2761,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2883,20 +2772,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfMap* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfMap"; } protected: - explicit MessageOfMap(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfMap(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2931,32 +2820,31 @@ namespace protobuf private: const ::protobuf::MessageOfMap_Row& _internal_rows(int index) const; ::protobuf::MessageOfMap_Row* _internal_add_rows(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>& _internal_rows() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>* _internal_mutable_rows(); public: const ::protobuf::MessageOfMap_Row& rows(int index) const; ::protobuf::MessageOfMap_Row* add_rows(); const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>& rows() const; + // uint32 height = 1; void clear_height(); - ::uint32_t height() const; - void set_height(::uint32_t value); + uint32_t height() const; + void set_height(uint32_t value); private: - ::uint32_t _internal_height() const; - void _internal_set_height(::uint32_t value); + uint32_t _internal_height() const; + void _internal_set_height(uint32_t value); public: // uint32 width = 2; void clear_width(); - ::uint32_t width() const; - void set_width(::uint32_t value); + uint32_t width() const; + void set_width(uint32_t value); private: - ::uint32_t _internal_width() const; - void _internal_set_width(::uint32_t value); + uint32_t _internal_width() const; + void _internal_set_width(uint32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfMap) @@ -2971,8 +2859,8 @@ namespace protobuf struct Impl_ { ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row> rows_; - ::uint32_t height_; - ::uint32_t width_; + uint32_t height_; + uint32_t width_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -2980,7 +2868,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfTeam final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfTeam) */ @@ -2991,7 +2880,6 @@ namespace protobuf { } ~MessageOfTeam() override; - template explicit PROTOBUF_CONSTEXPR MessageOfTeam(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfTeam(const MessageOfTeam& from); @@ -3026,15 +2914,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -3087,7 +2966,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -3112,10 +2991,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -3123,20 +3002,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfTeam* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfTeam"; } protected: - explicit MessageOfTeam(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfTeam(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -3158,52 +3037,52 @@ namespace protobuf }; // int64 team_id = 1; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // int64 player_id = 2; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int32 score = 3; void clear_score(); - ::int32_t score() const; - void set_score(::int32_t value); + int32_t score() const; + void set_score(int32_t value); private: - ::int32_t _internal_score() const; - void _internal_set_score(::int32_t value); + int32_t _internal_score() const; + void _internal_set_score(int32_t value); public: // int32 money = 4; void clear_money(); - ::int32_t money() const; - void set_money(::int32_t value); + int32_t money() const; + void set_money(int32_t value); private: - ::int32_t _internal_money() const; - void _internal_set_money(::int32_t value); + int32_t _internal_money() const; + void _internal_set_money(int32_t value); public: // int64 guid = 5; void clear_guid(); - ::int64_t guid() const; - void set_guid(::int64_t value); + int64_t guid() const; + void set_guid(int64_t value); private: - ::int64_t _internal_guid() const; - void _internal_set_guid(::int64_t value); + int64_t _internal_guid() const; + void _internal_set_guid(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfTeam) @@ -3217,11 +3096,11 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t team_id_; - ::int64_t player_id_; - ::int32_t score_; - ::int32_t money_; - ::int64_t guid_; + int64_t team_id_; + int64_t player_id_; + int32_t score_; + int32_t money_; + int64_t guid_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -3229,7 +3108,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfObj final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfObj) */ @@ -3240,7 +3120,6 @@ namespace protobuf { } ~MessageOfObj() override; - template explicit PROTOBUF_CONSTEXPR MessageOfObj(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfObj(const MessageOfObj& from); @@ -3275,15 +3154,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -3353,7 +3223,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -3378,10 +3248,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -3389,20 +3259,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfObj* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfObj"; } protected: - explicit MessageOfObj(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfObj(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -3451,6 +3321,7 @@ namespace protobuf ::protobuf::MessageOfShip* ship_message ); ::protobuf::MessageOfShip* unsafe_arena_release_ship_message(); + // .protobuf.MessageOfBullet bullet_message = 2; bool has_bullet_message() const; @@ -3473,6 +3344,7 @@ namespace protobuf ::protobuf::MessageOfBullet* bullet_message ); ::protobuf::MessageOfBullet* unsafe_arena_release_bullet_message(); + // .protobuf.MessageOfFactory factory_message = 3; bool has_factory_message() const; @@ -3495,6 +3367,7 @@ namespace protobuf ::protobuf::MessageOfFactory* factory_message ); ::protobuf::MessageOfFactory* unsafe_arena_release_factory_message(); + // .protobuf.MessageOfCommunity community_message = 4; bool has_community_message() const; @@ -3517,6 +3390,7 @@ namespace protobuf ::protobuf::MessageOfCommunity* community_message ); ::protobuf::MessageOfCommunity* unsafe_arena_release_community_message(); + // .protobuf.MessageOfFort fort_message = 5; bool has_fort_message() const; @@ -3539,6 +3413,7 @@ namespace protobuf ::protobuf::MessageOfFort* fort_message ); ::protobuf::MessageOfFort* unsafe_arena_release_fort_message(); + // .protobuf.MessageOfWormhole wormhole_message = 6; bool has_wormhole_message() const; @@ -3561,6 +3436,7 @@ namespace protobuf ::protobuf::MessageOfWormhole* wormhole_message ); ::protobuf::MessageOfWormhole* unsafe_arena_release_wormhole_message(); + // .protobuf.MessageOfHome home_message = 7; bool has_home_message() const; @@ -3583,6 +3459,7 @@ namespace protobuf ::protobuf::MessageOfHome* home_message ); ::protobuf::MessageOfHome* unsafe_arena_release_home_message(); + // .protobuf.MessageOfResource resource_message = 8; bool has_resource_message() const; @@ -3605,6 +3482,7 @@ namespace protobuf ::protobuf::MessageOfResource* resource_message ); ::protobuf::MessageOfResource* unsafe_arena_release_resource_message(); + // .protobuf.MessageOfMap map_message = 9; bool has_map_message() const; @@ -3627,6 +3505,7 @@ namespace protobuf ::protobuf::MessageOfMap* map_message ); ::protobuf::MessageOfMap* unsafe_arena_release_map_message(); + // .protobuf.MessageOfNews news_message = 10; bool has_news_message() const; @@ -3649,6 +3528,7 @@ namespace protobuf ::protobuf::MessageOfNews* news_message ); ::protobuf::MessageOfNews* unsafe_arena_release_news_message(); + // .protobuf.MessageOfBombedBullet bombed_bullet_message = 11; bool has_bombed_bullet_message() const; @@ -3671,6 +3551,7 @@ namespace protobuf ::protobuf::MessageOfBombedBullet* bombed_bullet_message ); ::protobuf::MessageOfBombedBullet* unsafe_arena_release_bombed_bullet_message(); + // .protobuf.MessageOfTeam team_message = 12; bool has_team_message() const; @@ -3693,6 +3574,7 @@ namespace protobuf ::protobuf::MessageOfTeam* team_message ); ::protobuf::MessageOfTeam* unsafe_arena_release_team_message(); + void clear_message_of_obj(); MessageOfObjCase message_of_obj_case() const; // @@protoc_insertion_point(class_scope:protobuf.MessageOfObj) @@ -3742,14 +3624,15 @@ namespace protobuf ::protobuf::MessageOfTeam* team_message_; } message_of_obj_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - ::uint32_t _oneof_case_[1]; + uint32_t _oneof_case_[1]; }; union { Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfAll final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfAll) */ @@ -3760,7 +3643,6 @@ namespace protobuf { } ~MessageOfAll() override; - template explicit PROTOBUF_CONSTEXPR MessageOfAll(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfAll(const MessageOfAll& from); @@ -3795,15 +3677,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -3856,7 +3729,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -3881,10 +3754,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -3892,20 +3765,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfAll* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfAll"; } protected: - explicit MessageOfAll(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfAll(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -3922,35 +3795,79 @@ namespace protobuf kGameTimeFieldNumber = 1, kRedTeamScoreFieldNumber = 2, kBlueTeamScoreFieldNumber = 3, + kRedTeamMoneyFieldNumber = 4, + kBlueTeamMoneyFieldNumber = 5, + kRedHomeHpFieldNumber = 6, + kBlueHomeHpFieldNumber = 7, }; // int32 game_time = 1; void clear_game_time(); - ::int32_t game_time() const; - void set_game_time(::int32_t value); + int32_t game_time() const; + void set_game_time(int32_t value); private: - ::int32_t _internal_game_time() const; - void _internal_set_game_time(::int32_t value); + int32_t _internal_game_time() const; + void _internal_set_game_time(int32_t value); public: // int32 red_team_score = 2; void clear_red_team_score(); - ::int32_t red_team_score() const; - void set_red_team_score(::int32_t value); + int32_t red_team_score() const; + void set_red_team_score(int32_t value); private: - ::int32_t _internal_red_team_score() const; - void _internal_set_red_team_score(::int32_t value); + int32_t _internal_red_team_score() const; + void _internal_set_red_team_score(int32_t value); public: // int32 blue_team_score = 3; void clear_blue_team_score(); - ::int32_t blue_team_score() const; - void set_blue_team_score(::int32_t value); + int32_t blue_team_score() const; + void set_blue_team_score(int32_t value); + + private: + int32_t _internal_blue_team_score() const; + void _internal_set_blue_team_score(int32_t value); + + public: + // int32 red_team_money = 4; + void clear_red_team_money(); + int32_t red_team_money() const; + void set_red_team_money(int32_t value); + + private: + int32_t _internal_red_team_money() const; + void _internal_set_red_team_money(int32_t value); + + public: + // int32 blue_team_money = 5; + void clear_blue_team_money(); + int32_t blue_team_money() const; + void set_blue_team_money(int32_t value); + + private: + int32_t _internal_blue_team_money() const; + void _internal_set_blue_team_money(int32_t value); + + public: + // int32 red_home_hp = 6; + void clear_red_home_hp(); + int32_t red_home_hp() const; + void set_red_home_hp(int32_t value); + + private: + int32_t _internal_red_home_hp() const; + void _internal_set_red_home_hp(int32_t value); + + public: + // int32 blue_home_hp = 7; + void clear_blue_home_hp(); + int32_t blue_home_hp() const; + void set_blue_home_hp(int32_t value); private: - ::int32_t _internal_blue_team_score() const; - void _internal_set_blue_team_score(::int32_t value); + int32_t _internal_blue_home_hp() const; + void _internal_set_blue_home_hp(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MessageOfAll) @@ -3964,9 +3881,13 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t game_time_; - ::int32_t red_team_score_; - ::int32_t blue_team_score_; + int32_t game_time_; + int32_t red_team_score_; + int32_t blue_team_score_; + int32_t red_team_money_; + int32_t blue_team_money_; + int32_t red_home_hp_; + int32_t blue_home_hp_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -3974,7 +3895,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageToClient final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageToClient) */ @@ -3985,7 +3907,6 @@ namespace protobuf { } ~MessageToClient() override; - template explicit PROTOBUF_CONSTEXPR MessageToClient(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageToClient(const MessageToClient& from); @@ -4020,15 +3941,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -4081,7 +3993,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -4106,10 +4018,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -4117,20 +4029,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageToClient* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageToClient"; } protected: - explicit MessageToClient(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageToClient(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -4163,16 +4075,20 @@ namespace protobuf private: const ::protobuf::MessageOfObj& _internal_obj_message(int index) const; ::protobuf::MessageOfObj* _internal_add_obj_message(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>& _internal_obj_message() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>* _internal_mutable_obj_message(); public: const ::protobuf::MessageOfObj& obj_message(int index) const; ::protobuf::MessageOfObj* add_obj_message(); const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>& obj_message() const; + // .protobuf.MessageOfAll all_message = 3; bool has_all_message() const; + + private: + bool _internal_has_all_message() const; + + public: void clear_all_message(); const ::protobuf::MessageOfAll& all_message() const; PROTOBUF_NODISCARD ::protobuf::MessageOfAll* release_all_message(); @@ -4188,6 +4104,7 @@ namespace protobuf ::protobuf::MessageOfAll* all_message ); ::protobuf::MessageOfAll* unsafe_arena_release_all_message(); + // .protobuf.GameState game_state = 2; void clear_game_state(); ::protobuf::GameState game_state() const; @@ -4209,18 +4126,18 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::PROTOBUF_NAMESPACE_ID::internal::HasBits<1> _has_bits_; - mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj> obj_message_; ::protobuf::MessageOfAll* all_message_; int game_state_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union { Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MoveRes final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MoveRes) */ @@ -4231,7 +4148,6 @@ namespace protobuf { } ~MoveRes() override; - template explicit PROTOBUF_CONSTEXPR MoveRes(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MoveRes(const MoveRes& from); @@ -4266,15 +4182,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -4327,7 +4234,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -4352,10 +4259,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -4363,20 +4270,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MoveRes* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MoveRes"; } protected: - explicit MoveRes(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MoveRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -4396,12 +4303,12 @@ namespace protobuf }; // int64 actual_speed = 1; void clear_actual_speed(); - ::int64_t actual_speed() const; - void set_actual_speed(::int64_t value); + int64_t actual_speed() const; + void set_actual_speed(int64_t value); private: - ::int64_t _internal_actual_speed() const; - void _internal_set_actual_speed(::int64_t value); + int64_t _internal_actual_speed() const; + void _internal_set_actual_speed(int64_t value); public: // double actual_angle = 2; @@ -4435,7 +4342,7 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t actual_speed_; + int64_t actual_speed_; double actual_angle_; bool act_success_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; @@ -4445,7 +4352,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class BoolRes final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.BoolRes) */ @@ -4456,7 +4364,6 @@ namespace protobuf { } ~BoolRes() override; - template explicit PROTOBUF_CONSTEXPR BoolRes(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); BoolRes(const BoolRes& from); @@ -4491,15 +4398,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -4552,7 +4450,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -4577,10 +4475,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -4588,20 +4486,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(BoolRes* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.BoolRes"; } protected: - explicit BoolRes(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit BoolRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -4646,7 +4544,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class ShipInfoRes final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.ShipInfoRes) */ @@ -4657,7 +4556,6 @@ namespace protobuf { } ~ShipInfoRes() override; - template explicit PROTOBUF_CONSTEXPR ShipInfoRes(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); ShipInfoRes(const ShipInfoRes& from); @@ -4692,15 +4590,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -4753,7 +4642,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -4778,10 +4667,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -4789,20 +4678,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(ShipInfoRes* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.ShipInfoRes"; } protected: - explicit ShipInfoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit ShipInfoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -4833,14 +4722,13 @@ namespace protobuf private: const ::protobuf::MessageOfShip& _internal_ship_info(int index) const; ::protobuf::MessageOfShip* _internal_add_ship_info(); - const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>& _internal_ship_info() const; - ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>* _internal_mutable_ship_info(); public: const ::protobuf::MessageOfShip& ship_info(int index) const; ::protobuf::MessageOfShip* add_ship_info(); const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>& ship_info() const; + // @@protoc_insertion_point(class_scope:protobuf.ShipInfoRes) private: @@ -4860,7 +4748,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class EcoRes final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.EcoRes) */ @@ -4871,7 +4760,6 @@ namespace protobuf { } ~EcoRes() override; - template explicit PROTOBUF_CONSTEXPR EcoRes(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); EcoRes(const EcoRes& from); @@ -4906,15 +4794,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -4967,7 +4846,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -4992,10 +4871,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -5003,20 +4882,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(EcoRes* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.EcoRes"; } protected: - explicit EcoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit EcoRes(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -5034,12 +4913,12 @@ namespace protobuf }; // int64 economy = 1; void clear_economy(); - ::int64_t economy() const; - void set_economy(::int64_t value); + int64_t economy() const; + void set_economy(int64_t value); private: - ::int64_t _internal_economy() const; - void _internal_set_economy(::int64_t value); + int64_t _internal_economy() const; + void _internal_set_economy(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.EcoRes) @@ -5053,7 +4932,7 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t economy_; + int64_t economy_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -5061,7 +4940,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Clients_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MessageOfNews final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MessageOfNews) */ @@ -5072,7 +4952,6 @@ namespace protobuf { } ~MessageOfNews() override; - template explicit PROTOBUF_CONSTEXPR MessageOfNews(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MessageOfNews(const MessageOfNews& from); @@ -5107,15 +4986,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -5175,7 +5045,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -5200,10 +5070,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -5211,20 +5081,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MessageOfNews* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MessageOfNews"; } protected: - explicit MessageOfNews(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MessageOfNews(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -5245,59 +5115,63 @@ namespace protobuf }; // int64 from_id = 2; void clear_from_id(); - ::int64_t from_id() const; - void set_from_id(::int64_t value); + int64_t from_id() const; + void set_from_id(int64_t value); private: - ::int64_t _internal_from_id() const; - void _internal_set_from_id(::int64_t value); + int64_t _internal_from_id() const; + void _internal_set_from_id(int64_t value); public: // int64 to_id = 3; void clear_to_id(); - ::int64_t to_id() const; - void set_to_id(::int64_t value); + int64_t to_id() const; + void set_to_id(int64_t value); private: - ::int64_t _internal_to_id() const; - void _internal_set_to_id(::int64_t value); + int64_t _internal_to_id() const; + void _internal_set_to_id(int64_t value); public: // string text_message = 1; bool has_text_message() const; + + private: + bool _internal_has_text_message() const; + + public: void clear_text_message(); const std::string& text_message() const; - - template - void set_text_message(Arg_&& arg, Args_... args); + template + void set_text_message(ArgT0&& arg0, ArgT... args); std::string* mutable_text_message(); PROTOBUF_NODISCARD std::string* release_text_message(); - void set_allocated_text_message(std::string* ptr); + void set_allocated_text_message(std::string* text_message); private: const std::string& _internal_text_message() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_text_message( - const std::string& value - ); + inline PROTOBUF_ALWAYS_INLINE void _internal_set_text_message(const std::string& value); std::string* _internal_mutable_text_message(); public: // bytes binary_message = 4; bool has_binary_message() const; + + private: + bool _internal_has_binary_message() const; + + public: void clear_binary_message(); const std::string& binary_message() const; - - template - void set_binary_message(Arg_&& arg, Args_... args); + template + void set_binary_message(ArgT0&& arg0, ArgT... args); std::string* mutable_binary_message(); PROTOBUF_NODISCARD std::string* release_binary_message(); - void set_allocated_binary_message(std::string* ptr); + void set_allocated_binary_message(std::string* binary_message); private: const std::string& _internal_binary_message() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_binary_message( - const std::string& value - ); + inline PROTOBUF_ALWAYS_INLINE void _internal_set_binary_message(const std::string& value); std::string* _internal_mutable_binary_message(); public: @@ -5319,8 +5193,8 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t from_id_; - ::int64_t to_id_; + int64_t from_id_; + int64_t to_id_; union NewsUnion { constexpr NewsUnion() : @@ -5332,7 +5206,7 @@ namespace protobuf ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr binary_message_; } news_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - ::uint32_t _oneof_case_[1]; + uint32_t _oneof_case_[1]; }; union { @@ -5340,7 +5214,6 @@ namespace protobuf }; friend struct ::TableStruct_Message2Clients_2eproto; }; - // =================================================================== // =================================================================== @@ -5349,8 +5222,6 @@ namespace protobuf #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif // __GNUC__ - // ------------------------------------------------------------------- - // MessageOfShip // int32 x = 1; @@ -5358,24 +5229,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfShip::x() const + inline int32_t MessageOfShip::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfShip::set_x(::int32_t value) + inline int32_t MessageOfShip::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.x) + return _internal_x(); } - inline ::int32_t MessageOfShip::_internal_x() const + inline void MessageOfShip::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfShip::_internal_set_x(::int32_t value) + inline void MessageOfShip::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.x) } // int32 y = 2; @@ -5383,24 +5253,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfShip::y() const + inline int32_t MessageOfShip::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfShip::set_y(::int32_t value) + inline int32_t MessageOfShip::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.y) + return _internal_y(); } - inline ::int32_t MessageOfShip::_internal_y() const + inline void MessageOfShip::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfShip::_internal_set_y(::int32_t value) + inline void MessageOfShip::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.y) } // int32 speed = 3; @@ -5408,24 +5277,23 @@ namespace protobuf { _impl_.speed_ = 0; } - inline ::int32_t MessageOfShip::speed() const + inline int32_t MessageOfShip::_internal_speed() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.speed) - return _internal_speed(); + return _impl_.speed_; } - inline void MessageOfShip::set_speed(::int32_t value) + inline int32_t MessageOfShip::speed() const { - _internal_set_speed(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.speed) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.speed) + return _internal_speed(); } - inline ::int32_t MessageOfShip::_internal_speed() const + inline void MessageOfShip::_internal_set_speed(int32_t value) { - return _impl_.speed_; + _impl_.speed_ = value; } - inline void MessageOfShip::_internal_set_speed(::int32_t value) + inline void MessageOfShip::set_speed(int32_t value) { - ; - _impl_.speed_ = value; + _internal_set_speed(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.speed) } // int32 hp = 4; @@ -5433,49 +5301,47 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfShip::hp() const + inline int32_t MessageOfShip::_internal_hp() const + { + return _impl_.hp_; + } + inline int32_t MessageOfShip::hp() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.hp) return _internal_hp(); } - inline void MessageOfShip::set_hp(::int32_t value) + inline void MessageOfShip::_internal_set_hp(int32_t value) + { + _impl_.hp_ = value; + } + inline void MessageOfShip::set_hp(int32_t value) { _internal_set_hp(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.hp) } - inline ::int32_t MessageOfShip::_internal_hp() const + + // int32 armor = 5; + inline void MessageOfShip::clear_armor() { - return _impl_.hp_; + _impl_.armor_ = 0; } - inline void MessageOfShip::_internal_set_hp(::int32_t value) + inline int32_t MessageOfShip::_internal_armor() const { - ; - _impl_.hp_ = value; - } - - // int32 armor = 5; - inline void MessageOfShip::clear_armor() - { - _impl_.armor_ = 0; + return _impl_.armor_; } - inline ::int32_t MessageOfShip::armor() const + inline int32_t MessageOfShip::armor() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.armor) return _internal_armor(); } - inline void MessageOfShip::set_armor(::int32_t value) - { - _internal_set_armor(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.armor) - } - inline ::int32_t MessageOfShip::_internal_armor() const + inline void MessageOfShip::_internal_set_armor(int32_t value) { - return _impl_.armor_; + _impl_.armor_ = value; } - inline void MessageOfShip::_internal_set_armor(::int32_t value) + inline void MessageOfShip::set_armor(int32_t value) { - ; - _impl_.armor_ = value; + _internal_set_armor(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.armor) } // int32 shield = 6; @@ -5483,99 +5349,95 @@ namespace protobuf { _impl_.shield_ = 0; } - inline ::int32_t MessageOfShip::shield() const + inline int32_t MessageOfShip::_internal_shield() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.shield) - return _internal_shield(); + return _impl_.shield_; } - inline void MessageOfShip::set_shield(::int32_t value) + inline int32_t MessageOfShip::shield() const { - _internal_set_shield(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.shield) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.shield) + return _internal_shield(); } - inline ::int32_t MessageOfShip::_internal_shield() const + inline void MessageOfShip::_internal_set_shield(int32_t value) { - return _impl_.shield_; + _impl_.shield_ = value; } - inline void MessageOfShip::_internal_set_shield(::int32_t value) + inline void MessageOfShip::set_shield(int32_t value) { - ; - _impl_.shield_ = value; + _internal_set_shield(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.shield) } // int64 team_id = 7; inline void MessageOfShip::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfShip::team_id() const + inline int64_t MessageOfShip::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfShip::set_team_id(::int64_t value) + inline int64_t MessageOfShip::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfShip::_internal_team_id() const + inline void MessageOfShip::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfShip::_internal_set_team_id(::int64_t value) + inline void MessageOfShip::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.team_id) } // int64 player_id = 8; inline void MessageOfShip::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t MessageOfShip::player_id() const + inline int64_t MessageOfShip::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void MessageOfShip::set_player_id(::int64_t value) + inline int64_t MessageOfShip::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.player_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.player_id) + return _internal_player_id(); } - inline ::int64_t MessageOfShip::_internal_player_id() const + inline void MessageOfShip::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void MessageOfShip::_internal_set_player_id(::int64_t value) + inline void MessageOfShip::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.player_id) } // int64 guid = 9; inline void MessageOfShip::clear_guid() { - _impl_.guid_ = ::int64_t{0}; + _impl_.guid_ = int64_t{0}; } - inline ::int64_t MessageOfShip::guid() const + inline int64_t MessageOfShip::_internal_guid() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.guid) - return _internal_guid(); + return _impl_.guid_; } - inline void MessageOfShip::set_guid(::int64_t value) + inline int64_t MessageOfShip::guid() const { - _internal_set_guid(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.guid) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.guid) + return _internal_guid(); } - inline ::int64_t MessageOfShip::_internal_guid() const + inline void MessageOfShip::_internal_set_guid(int64_t value) { - return _impl_.guid_; + _impl_.guid_ = value; } - inline void MessageOfShip::_internal_set_guid(::int64_t value) + inline void MessageOfShip::set_guid(int64_t value) { - ; - _impl_.guid_ = value; + _internal_set_guid(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.guid) } // .protobuf.ShipState ship_state = 10; @@ -5583,74 +5445,71 @@ namespace protobuf { _impl_.ship_state_ = 0; } + inline ::protobuf::ShipState MessageOfShip::_internal_ship_state() const + { + return static_cast<::protobuf::ShipState>(_impl_.ship_state_); + } inline ::protobuf::ShipState MessageOfShip::ship_state() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.ship_state) return _internal_ship_state(); } + inline void MessageOfShip::_internal_set_ship_state(::protobuf::ShipState value) + { + _impl_.ship_state_ = value; + } inline void MessageOfShip::set_ship_state(::protobuf::ShipState value) { _internal_set_ship_state(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.ship_state) } - inline ::protobuf::ShipState MessageOfShip::_internal_ship_state() const - { - return static_cast<::protobuf::ShipState>(_impl_.ship_state_); - } - inline void MessageOfShip::_internal_set_ship_state(::protobuf::ShipState value) - { - ; - _impl_.ship_state_ = value; - } // .protobuf.ShipType ship_type = 11; inline void MessageOfShip::clear_ship_type() { _impl_.ship_type_ = 0; } + inline ::protobuf::ShipType MessageOfShip::_internal_ship_type() const + { + return static_cast<::protobuf::ShipType>(_impl_.ship_type_); + } inline ::protobuf::ShipType MessageOfShip::ship_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.ship_type) return _internal_ship_type(); } + inline void MessageOfShip::_internal_set_ship_type(::protobuf::ShipType value) + { + _impl_.ship_type_ = value; + } inline void MessageOfShip::set_ship_type(::protobuf::ShipType value) { _internal_set_ship_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.ship_type) } - inline ::protobuf::ShipType MessageOfShip::_internal_ship_type() const - { - return static_cast<::protobuf::ShipType>(_impl_.ship_type_); - } - inline void MessageOfShip::_internal_set_ship_type(::protobuf::ShipType value) - { - ; - _impl_.ship_type_ = value; - } // int32 view_range = 12; inline void MessageOfShip::clear_view_range() { _impl_.view_range_ = 0; } - inline ::int32_t MessageOfShip::view_range() const + inline int32_t MessageOfShip::_internal_view_range() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.view_range) - return _internal_view_range(); + return _impl_.view_range_; } - inline void MessageOfShip::set_view_range(::int32_t value) + inline int32_t MessageOfShip::view_range() const { - _internal_set_view_range(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.view_range) + // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.view_range) + return _internal_view_range(); } - inline ::int32_t MessageOfShip::_internal_view_range() const + inline void MessageOfShip::_internal_set_view_range(int32_t value) { - return _impl_.view_range_; + _impl_.view_range_ = value; } - inline void MessageOfShip::_internal_set_view_range(::int32_t value) + inline void MessageOfShip::set_view_range(int32_t value) { - ; - _impl_.view_range_ = value; + _internal_set_view_range(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.view_range) } // .protobuf.ProducerType producer_type = 13; @@ -5658,150 +5517,144 @@ namespace protobuf { _impl_.producer_type_ = 0; } + inline ::protobuf::ProducerType MessageOfShip::_internal_producer_type() const + { + return static_cast<::protobuf::ProducerType>(_impl_.producer_type_); + } inline ::protobuf::ProducerType MessageOfShip::producer_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.producer_type) return _internal_producer_type(); } + inline void MessageOfShip::_internal_set_producer_type(::protobuf::ProducerType value) + { + _impl_.producer_type_ = value; + } inline void MessageOfShip::set_producer_type(::protobuf::ProducerType value) { _internal_set_producer_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.producer_type) } - inline ::protobuf::ProducerType MessageOfShip::_internal_producer_type() const - { - return static_cast<::protobuf::ProducerType>(_impl_.producer_type_); - } - inline void MessageOfShip::_internal_set_producer_type(::protobuf::ProducerType value) - { - ; - _impl_.producer_type_ = value; - } // .protobuf.ConstructorType constructor_type = 14; inline void MessageOfShip::clear_constructor_type() { _impl_.constructor_type_ = 0; } + inline ::protobuf::ConstructorType MessageOfShip::_internal_constructor_type() const + { + return static_cast<::protobuf::ConstructorType>(_impl_.constructor_type_); + } inline ::protobuf::ConstructorType MessageOfShip::constructor_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.constructor_type) return _internal_constructor_type(); } + inline void MessageOfShip::_internal_set_constructor_type(::protobuf::ConstructorType value) + { + _impl_.constructor_type_ = value; + } inline void MessageOfShip::set_constructor_type(::protobuf::ConstructorType value) { _internal_set_constructor_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.constructor_type) } - inline ::protobuf::ConstructorType MessageOfShip::_internal_constructor_type() const - { - return static_cast<::protobuf::ConstructorType>(_impl_.constructor_type_); - } - inline void MessageOfShip::_internal_set_constructor_type(::protobuf::ConstructorType value) - { - ; - _impl_.constructor_type_ = value; - } // .protobuf.ArmorType armor_type = 15; inline void MessageOfShip::clear_armor_type() { _impl_.armor_type_ = 0; } + inline ::protobuf::ArmorType MessageOfShip::_internal_armor_type() const + { + return static_cast<::protobuf::ArmorType>(_impl_.armor_type_); + } inline ::protobuf::ArmorType MessageOfShip::armor_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.armor_type) return _internal_armor_type(); } + inline void MessageOfShip::_internal_set_armor_type(::protobuf::ArmorType value) + { + _impl_.armor_type_ = value; + } inline void MessageOfShip::set_armor_type(::protobuf::ArmorType value) { _internal_set_armor_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.armor_type) } - inline ::protobuf::ArmorType MessageOfShip::_internal_armor_type() const - { - return static_cast<::protobuf::ArmorType>(_impl_.armor_type_); - } - inline void MessageOfShip::_internal_set_armor_type(::protobuf::ArmorType value) - { - ; - _impl_.armor_type_ = value; - } // .protobuf.ShieldType shield_type = 16; inline void MessageOfShip::clear_shield_type() { _impl_.shield_type_ = 0; } + inline ::protobuf::ShieldType MessageOfShip::_internal_shield_type() const + { + return static_cast<::protobuf::ShieldType>(_impl_.shield_type_); + } inline ::protobuf::ShieldType MessageOfShip::shield_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.shield_type) return _internal_shield_type(); } + inline void MessageOfShip::_internal_set_shield_type(::protobuf::ShieldType value) + { + _impl_.shield_type_ = value; + } inline void MessageOfShip::set_shield_type(::protobuf::ShieldType value) { _internal_set_shield_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.shield_type) } - inline ::protobuf::ShieldType MessageOfShip::_internal_shield_type() const - { - return static_cast<::protobuf::ShieldType>(_impl_.shield_type_); - } - inline void MessageOfShip::_internal_set_shield_type(::protobuf::ShieldType value) - { - ; - _impl_.shield_type_ = value; - } // .protobuf.WeaponType weapon_type = 17; inline void MessageOfShip::clear_weapon_type() { _impl_.weapon_type_ = 0; } + inline ::protobuf::WeaponType MessageOfShip::_internal_weapon_type() const + { + return static_cast<::protobuf::WeaponType>(_impl_.weapon_type_); + } inline ::protobuf::WeaponType MessageOfShip::weapon_type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.weapon_type) return _internal_weapon_type(); } + inline void MessageOfShip::_internal_set_weapon_type(::protobuf::WeaponType value) + { + _impl_.weapon_type_ = value; + } inline void MessageOfShip::set_weapon_type(::protobuf::WeaponType value) { _internal_set_weapon_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.weapon_type) } - inline ::protobuf::WeaponType MessageOfShip::_internal_weapon_type() const - { - return static_cast<::protobuf::WeaponType>(_impl_.weapon_type_); - } - inline void MessageOfShip::_internal_set_weapon_type(::protobuf::WeaponType value) - { - ; - _impl_.weapon_type_ = value; - } // double facing_direction = 18; inline void MessageOfShip::clear_facing_direction() { _impl_.facing_direction_ = 0; } + inline double MessageOfShip::_internal_facing_direction() const + { + return _impl_.facing_direction_; + } inline double MessageOfShip::facing_direction() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfShip.facing_direction) return _internal_facing_direction(); } + inline void MessageOfShip::_internal_set_facing_direction(double value) + { + _impl_.facing_direction_ = value; + } inline void MessageOfShip::set_facing_direction(double value) { _internal_set_facing_direction(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfShip.facing_direction) } - inline double MessageOfShip::_internal_facing_direction() const - { - return _impl_.facing_direction_; - } - inline void MessageOfShip::_internal_set_facing_direction(double value) - { - ; - _impl_.facing_direction_ = value; - } // ------------------------------------------------------------------- @@ -5812,49 +5665,47 @@ namespace protobuf { _impl_.type_ = 0; } + inline ::protobuf::BulletType MessageOfBullet::_internal_type() const + { + return static_cast<::protobuf::BulletType>(_impl_.type_); + } inline ::protobuf::BulletType MessageOfBullet::type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.type) return _internal_type(); } + inline void MessageOfBullet::_internal_set_type(::protobuf::BulletType value) + { + _impl_.type_ = value; + } inline void MessageOfBullet::set_type(::protobuf::BulletType value) { _internal_set_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.type) } - inline ::protobuf::BulletType MessageOfBullet::_internal_type() const - { - return static_cast<::protobuf::BulletType>(_impl_.type_); - } - inline void MessageOfBullet::_internal_set_type(::protobuf::BulletType value) - { - ; - _impl_.type_ = value; - } // int32 x = 2; inline void MessageOfBullet::clear_x() { _impl_.x_ = 0; } - inline ::int32_t MessageOfBullet::x() const + inline int32_t MessageOfBullet::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfBullet::set_x(::int32_t value) + inline int32_t MessageOfBullet::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.x) + return _internal_x(); } - inline ::int32_t MessageOfBullet::_internal_x() const + inline void MessageOfBullet::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfBullet::_internal_set_x(::int32_t value) + inline void MessageOfBullet::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.x) } // int32 y = 3; @@ -5862,24 +5713,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfBullet::y() const + inline int32_t MessageOfBullet::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfBullet::set_y(::int32_t value) + inline int32_t MessageOfBullet::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.y) + return _internal_y(); } - inline ::int32_t MessageOfBullet::_internal_y() const + inline void MessageOfBullet::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfBullet::_internal_set_y(::int32_t value) + inline void MessageOfBullet::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.y) } // double facing_direction = 4; @@ -5887,99 +5737,95 @@ namespace protobuf { _impl_.facing_direction_ = 0; } + inline double MessageOfBullet::_internal_facing_direction() const + { + return _impl_.facing_direction_; + } inline double MessageOfBullet::facing_direction() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.facing_direction) return _internal_facing_direction(); } + inline void MessageOfBullet::_internal_set_facing_direction(double value) + { + _impl_.facing_direction_ = value; + } inline void MessageOfBullet::set_facing_direction(double value) { _internal_set_facing_direction(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.facing_direction) } - inline double MessageOfBullet::_internal_facing_direction() const - { - return _impl_.facing_direction_; - } - inline void MessageOfBullet::_internal_set_facing_direction(double value) - { - ; - _impl_.facing_direction_ = value; - } // int32 damage = 5; inline void MessageOfBullet::clear_damage() { _impl_.damage_ = 0; } - inline ::int32_t MessageOfBullet::damage() const + inline int32_t MessageOfBullet::_internal_damage() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.damage) - return _internal_damage(); + return _impl_.damage_; } - inline void MessageOfBullet::set_damage(::int32_t value) + inline int32_t MessageOfBullet::damage() const { - _internal_set_damage(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.damage) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.damage) + return _internal_damage(); } - inline ::int32_t MessageOfBullet::_internal_damage() const + inline void MessageOfBullet::_internal_set_damage(int32_t value) { - return _impl_.damage_; + _impl_.damage_ = value; } - inline void MessageOfBullet::_internal_set_damage(::int32_t value) + inline void MessageOfBullet::set_damage(int32_t value) { - ; - _impl_.damage_ = value; + _internal_set_damage(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.damage) } // int64 team_id = 6; inline void MessageOfBullet::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfBullet::team_id() const + inline int64_t MessageOfBullet::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfBullet::set_team_id(::int64_t value) + inline int64_t MessageOfBullet::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfBullet::_internal_team_id() const + inline void MessageOfBullet::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfBullet::_internal_set_team_id(::int64_t value) + inline void MessageOfBullet::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.team_id) } // int64 guid = 7; inline void MessageOfBullet::clear_guid() { - _impl_.guid_ = ::int64_t{0}; + _impl_.guid_ = int64_t{0}; } - inline ::int64_t MessageOfBullet::guid() const + inline int64_t MessageOfBullet::_internal_guid() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.guid) - return _internal_guid(); + return _impl_.guid_; } - inline void MessageOfBullet::set_guid(::int64_t value) + inline int64_t MessageOfBullet::guid() const { - _internal_set_guid(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.guid) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.guid) + return _internal_guid(); } - inline ::int64_t MessageOfBullet::_internal_guid() const + inline void MessageOfBullet::_internal_set_guid(int64_t value) { - return _impl_.guid_; + _impl_.guid_ = value; } - inline void MessageOfBullet::_internal_set_guid(::int64_t value) + inline void MessageOfBullet::set_guid(int64_t value) { - ; - _impl_.guid_ = value; + _internal_set_guid(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.guid) } // double bomb_range = 8; @@ -5987,49 +5833,47 @@ namespace protobuf { _impl_.bomb_range_ = 0; } + inline double MessageOfBullet::_internal_bomb_range() const + { + return _impl_.bomb_range_; + } inline double MessageOfBullet::bomb_range() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.bomb_range) return _internal_bomb_range(); } + inline void MessageOfBullet::_internal_set_bomb_range(double value) + { + _impl_.bomb_range_ = value; + } inline void MessageOfBullet::set_bomb_range(double value) { _internal_set_bomb_range(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.bomb_range) } - inline double MessageOfBullet::_internal_bomb_range() const - { - return _impl_.bomb_range_; - } - inline void MessageOfBullet::_internal_set_bomb_range(double value) - { - ; - _impl_.bomb_range_ = value; - } // int32 speed = 9; inline void MessageOfBullet::clear_speed() { _impl_.speed_ = 0; } - inline ::int32_t MessageOfBullet::speed() const + inline int32_t MessageOfBullet::_internal_speed() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.speed) - return _internal_speed(); + return _impl_.speed_; } - inline void MessageOfBullet::set_speed(::int32_t value) + inline int32_t MessageOfBullet::speed() const { - _internal_set_speed(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.speed) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBullet.speed) + return _internal_speed(); } - inline ::int32_t MessageOfBullet::_internal_speed() const + inline void MessageOfBullet::_internal_set_speed(int32_t value) { - return _impl_.speed_; + _impl_.speed_ = value; } - inline void MessageOfBullet::_internal_set_speed(::int32_t value) + inline void MessageOfBullet::set_speed(int32_t value) { - ; - _impl_.speed_ = value; + _internal_set_speed(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBullet.speed) } // ------------------------------------------------------------------- @@ -6041,49 +5885,47 @@ namespace protobuf { _impl_.type_ = 0; } + inline ::protobuf::BulletType MessageOfBombedBullet::_internal_type() const + { + return static_cast<::protobuf::BulletType>(_impl_.type_); + } inline ::protobuf::BulletType MessageOfBombedBullet::type() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.type) return _internal_type(); } + inline void MessageOfBombedBullet::_internal_set_type(::protobuf::BulletType value) + { + _impl_.type_ = value; + } inline void MessageOfBombedBullet::set_type(::protobuf::BulletType value) { _internal_set_type(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.type) } - inline ::protobuf::BulletType MessageOfBombedBullet::_internal_type() const - { - return static_cast<::protobuf::BulletType>(_impl_.type_); - } - inline void MessageOfBombedBullet::_internal_set_type(::protobuf::BulletType value) - { - ; - _impl_.type_ = value; - } // int32 x = 2; inline void MessageOfBombedBullet::clear_x() { _impl_.x_ = 0; } - inline ::int32_t MessageOfBombedBullet::x() const + inline int32_t MessageOfBombedBullet::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfBombedBullet::set_x(::int32_t value) + inline int32_t MessageOfBombedBullet::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.x) + return _internal_x(); } - inline ::int32_t MessageOfBombedBullet::_internal_x() const + inline void MessageOfBombedBullet::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfBombedBullet::_internal_set_x(::int32_t value) + inline void MessageOfBombedBullet::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.x) } // int32 y = 3; @@ -6091,24 +5933,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfBombedBullet::y() const + inline int32_t MessageOfBombedBullet::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfBombedBullet::set_y(::int32_t value) + inline int32_t MessageOfBombedBullet::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.y) + return _internal_y(); } - inline ::int32_t MessageOfBombedBullet::_internal_y() const + inline void MessageOfBombedBullet::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfBombedBullet::_internal_set_y(::int32_t value) + inline void MessageOfBombedBullet::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.y) } // double facing_direction = 4; @@ -6116,49 +5957,47 @@ namespace protobuf { _impl_.facing_direction_ = 0; } + inline double MessageOfBombedBullet::_internal_facing_direction() const + { + return _impl_.facing_direction_; + } inline double MessageOfBombedBullet::facing_direction() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.facing_direction) return _internal_facing_direction(); } + inline void MessageOfBombedBullet::_internal_set_facing_direction(double value) + { + _impl_.facing_direction_ = value; + } inline void MessageOfBombedBullet::set_facing_direction(double value) { _internal_set_facing_direction(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.facing_direction) } - inline double MessageOfBombedBullet::_internal_facing_direction() const - { - return _impl_.facing_direction_; - } - inline void MessageOfBombedBullet::_internal_set_facing_direction(double value) - { - ; - _impl_.facing_direction_ = value; - } // int64 mapping_id = 5; inline void MessageOfBombedBullet::clear_mapping_id() { - _impl_.mapping_id_ = ::int64_t{0}; + _impl_.mapping_id_ = int64_t{0}; } - inline ::int64_t MessageOfBombedBullet::mapping_id() const + inline int64_t MessageOfBombedBullet::_internal_mapping_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.mapping_id) - return _internal_mapping_id(); + return _impl_.mapping_id_; } - inline void MessageOfBombedBullet::set_mapping_id(::int64_t value) + inline int64_t MessageOfBombedBullet::mapping_id() const { - _internal_set_mapping_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.mapping_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.mapping_id) + return _internal_mapping_id(); } - inline ::int64_t MessageOfBombedBullet::_internal_mapping_id() const + inline void MessageOfBombedBullet::_internal_set_mapping_id(int64_t value) { - return _impl_.mapping_id_; + _impl_.mapping_id_ = value; } - inline void MessageOfBombedBullet::_internal_set_mapping_id(::int64_t value) + inline void MessageOfBombedBullet::set_mapping_id(int64_t value) { - ; - _impl_.mapping_id_ = value; + _internal_set_mapping_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.mapping_id) } // double bomb_range = 6; @@ -6166,25 +6005,24 @@ namespace protobuf { _impl_.bomb_range_ = 0; } + inline double MessageOfBombedBullet::_internal_bomb_range() const + { + return _impl_.bomb_range_; + } inline double MessageOfBombedBullet::bomb_range() const { // @@protoc_insertion_point(field_get:protobuf.MessageOfBombedBullet.bomb_range) return _internal_bomb_range(); } + inline void MessageOfBombedBullet::_internal_set_bomb_range(double value) + { + _impl_.bomb_range_ = value; + } inline void MessageOfBombedBullet::set_bomb_range(double value) { _internal_set_bomb_range(value); // @@protoc_insertion_point(field_set:protobuf.MessageOfBombedBullet.bomb_range) } - inline double MessageOfBombedBullet::_internal_bomb_range() const - { - return _impl_.bomb_range_; - } - inline void MessageOfBombedBullet::_internal_set_bomb_range(double value) - { - ; - _impl_.bomb_range_ = value; - } // ------------------------------------------------------------------- @@ -6195,24 +6033,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfFactory::x() const + inline int32_t MessageOfFactory::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfFactory::set_x(::int32_t value) + inline int32_t MessageOfFactory::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.x) + return _internal_x(); } - inline ::int32_t MessageOfFactory::_internal_x() const + inline void MessageOfFactory::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfFactory::_internal_set_x(::int32_t value) + inline void MessageOfFactory::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.x) } // int32 y = 2; @@ -6220,24 +6057,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfFactory::y() const + inline int32_t MessageOfFactory::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfFactory::set_y(::int32_t value) + inline int32_t MessageOfFactory::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.y) + return _internal_y(); } - inline ::int32_t MessageOfFactory::_internal_y() const + inline void MessageOfFactory::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfFactory::_internal_set_y(::int32_t value) + inline void MessageOfFactory::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.y) } // int32 hp = 3; @@ -6245,49 +6081,47 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfFactory::hp() const + inline int32_t MessageOfFactory::_internal_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.hp) - return _internal_hp(); + return _impl_.hp_; } - inline void MessageOfFactory::set_hp(::int32_t value) + inline int32_t MessageOfFactory::hp() const { - _internal_set_hp(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.hp) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.hp) + return _internal_hp(); } - inline ::int32_t MessageOfFactory::_internal_hp() const + inline void MessageOfFactory::_internal_set_hp(int32_t value) { - return _impl_.hp_; + _impl_.hp_ = value; } - inline void MessageOfFactory::_internal_set_hp(::int32_t value) + inline void MessageOfFactory::set_hp(int32_t value) { - ; - _impl_.hp_ = value; + _internal_set_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.hp) } // int64 team_id = 4; inline void MessageOfFactory::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfFactory::team_id() const + inline int64_t MessageOfFactory::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfFactory::set_team_id(::int64_t value) + inline int64_t MessageOfFactory::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFactory.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfFactory::_internal_team_id() const + inline void MessageOfFactory::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfFactory::_internal_set_team_id(::int64_t value) + inline void MessageOfFactory::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFactory.team_id) } // ------------------------------------------------------------------- @@ -6299,24 +6133,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfCommunity::x() const + inline int32_t MessageOfCommunity::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfCommunity::set_x(::int32_t value) + inline int32_t MessageOfCommunity::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.x) + return _internal_x(); } - inline ::int32_t MessageOfCommunity::_internal_x() const + inline void MessageOfCommunity::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfCommunity::_internal_set_x(::int32_t value) + inline void MessageOfCommunity::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.x) } // int32 y = 2; @@ -6324,24 +6157,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfCommunity::y() const + inline int32_t MessageOfCommunity::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfCommunity::set_y(::int32_t value) + inline int32_t MessageOfCommunity::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.y) + return _internal_y(); } - inline ::int32_t MessageOfCommunity::_internal_y() const + inline void MessageOfCommunity::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfCommunity::_internal_set_y(::int32_t value) + inline void MessageOfCommunity::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.y) } // int32 hp = 3; @@ -6349,49 +6181,47 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfCommunity::hp() const + inline int32_t MessageOfCommunity::_internal_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.hp) - return _internal_hp(); + return _impl_.hp_; } - inline void MessageOfCommunity::set_hp(::int32_t value) + inline int32_t MessageOfCommunity::hp() const { - _internal_set_hp(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.hp) + // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.hp) + return _internal_hp(); } - inline ::int32_t MessageOfCommunity::_internal_hp() const + inline void MessageOfCommunity::_internal_set_hp(int32_t value) { - return _impl_.hp_; + _impl_.hp_ = value; } - inline void MessageOfCommunity::_internal_set_hp(::int32_t value) + inline void MessageOfCommunity::set_hp(int32_t value) { - ; - _impl_.hp_ = value; + _internal_set_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.hp) } // int64 team_id = 4; inline void MessageOfCommunity::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfCommunity::team_id() const + inline int64_t MessageOfCommunity::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfCommunity::set_team_id(::int64_t value) + inline int64_t MessageOfCommunity::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfCommunity.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfCommunity::_internal_team_id() const + inline void MessageOfCommunity::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfCommunity::_internal_set_team_id(::int64_t value) + inline void MessageOfCommunity::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfCommunity.team_id) } // ------------------------------------------------------------------- @@ -6403,24 +6233,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfFort::x() const + inline int32_t MessageOfFort::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfFort::set_x(::int32_t value) + inline int32_t MessageOfFort::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.x) + return _internal_x(); } - inline ::int32_t MessageOfFort::_internal_x() const + inline void MessageOfFort::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfFort::_internal_set_x(::int32_t value) + inline void MessageOfFort::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.x) } // int32 y = 2; @@ -6428,24 +6257,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfFort::y() const + inline int32_t MessageOfFort::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfFort::set_y(::int32_t value) + inline int32_t MessageOfFort::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.y) + return _internal_y(); } - inline ::int32_t MessageOfFort::_internal_y() const + inline void MessageOfFort::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfFort::_internal_set_y(::int32_t value) + inline void MessageOfFort::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.y) } // int32 hp = 3; @@ -6453,49 +6281,47 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfFort::hp() const + inline int32_t MessageOfFort::_internal_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.hp) - return _internal_hp(); + return _impl_.hp_; } - inline void MessageOfFort::set_hp(::int32_t value) + inline int32_t MessageOfFort::hp() const { - _internal_set_hp(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.hp) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.hp) + return _internal_hp(); } - inline ::int32_t MessageOfFort::_internal_hp() const + inline void MessageOfFort::_internal_set_hp(int32_t value) { - return _impl_.hp_; + _impl_.hp_ = value; } - inline void MessageOfFort::_internal_set_hp(::int32_t value) + inline void MessageOfFort::set_hp(int32_t value) { - ; - _impl_.hp_ = value; + _internal_set_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.hp) } // int64 team_id = 4; inline void MessageOfFort::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfFort::team_id() const + inline int64_t MessageOfFort::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfFort::set_team_id(::int64_t value) + inline int64_t MessageOfFort::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfFort.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfFort::_internal_team_id() const + inline void MessageOfFort::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfFort::_internal_set_team_id(::int64_t value) + inline void MessageOfFort::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfFort.team_id) } // ------------------------------------------------------------------- @@ -6507,24 +6333,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfWormhole::x() const + inline int32_t MessageOfWormhole::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfWormhole::set_x(::int32_t value) + inline int32_t MessageOfWormhole::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.x) + return _internal_x(); } - inline ::int32_t MessageOfWormhole::_internal_x() const + inline void MessageOfWormhole::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfWormhole::_internal_set_x(::int32_t value) + inline void MessageOfWormhole::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.x) } // int32 y = 2; @@ -6532,24 +6357,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfWormhole::y() const + inline int32_t MessageOfWormhole::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfWormhole::set_y(::int32_t value) + inline int32_t MessageOfWormhole::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.y) + return _internal_y(); } - inline ::int32_t MessageOfWormhole::_internal_y() const + inline void MessageOfWormhole::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfWormhole::_internal_set_y(::int32_t value) + inline void MessageOfWormhole::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.y) } // int32 hp = 3; @@ -6557,24 +6381,23 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfWormhole::hp() const + inline int32_t MessageOfWormhole::_internal_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.hp) - return _internal_hp(); + return _impl_.hp_; } - inline void MessageOfWormhole::set_hp(::int32_t value) + inline int32_t MessageOfWormhole::hp() const { - _internal_set_hp(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.hp) + // @@protoc_insertion_point(field_get:protobuf.MessageOfWormhole.hp) + return _internal_hp(); } - inline ::int32_t MessageOfWormhole::_internal_hp() const + inline void MessageOfWormhole::_internal_set_hp(int32_t value) { - return _impl_.hp_; + _impl_.hp_ = value; } - inline void MessageOfWormhole::_internal_set_hp(::int32_t value) + inline void MessageOfWormhole::set_hp(int32_t value) { - ; - _impl_.hp_ = value; + _internal_set_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfWormhole.hp) } // ------------------------------------------------------------------- @@ -6586,24 +6409,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfResource::x() const + inline int32_t MessageOfResource::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfResource::set_x(::int32_t value) + inline int32_t MessageOfResource::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.x) + return _internal_x(); } - inline ::int32_t MessageOfResource::_internal_x() const + inline void MessageOfResource::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfResource::_internal_set_x(::int32_t value) + inline void MessageOfResource::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.x) } // int32 y = 2; @@ -6611,24 +6433,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfResource::y() const + inline int32_t MessageOfResource::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfResource::set_y(::int32_t value) + inline int32_t MessageOfResource::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.y) + return _internal_y(); } - inline ::int32_t MessageOfResource::_internal_y() const + inline void MessageOfResource::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfResource::_internal_set_y(::int32_t value) + inline void MessageOfResource::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.y) } // int32 progress = 3; @@ -6636,24 +6457,23 @@ namespace protobuf { _impl_.progress_ = 0; } - inline ::int32_t MessageOfResource::progress() const + inline int32_t MessageOfResource::_internal_progress() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.progress) - return _internal_progress(); + return _impl_.progress_; } - inline void MessageOfResource::set_progress(::int32_t value) + inline int32_t MessageOfResource::progress() const { - _internal_set_progress(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.progress) + // @@protoc_insertion_point(field_get:protobuf.MessageOfResource.progress) + return _internal_progress(); } - inline ::int32_t MessageOfResource::_internal_progress() const + inline void MessageOfResource::_internal_set_progress(int32_t value) { - return _impl_.progress_; + _impl_.progress_ = value; } - inline void MessageOfResource::_internal_set_progress(::int32_t value) + inline void MessageOfResource::set_progress(int32_t value) { - ; - _impl_.progress_ = value; + _internal_set_progress(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfResource.progress) } // ------------------------------------------------------------------- @@ -6665,24 +6485,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t MessageOfHome::x() const + inline int32_t MessageOfHome::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.x) - return _internal_x(); + return _impl_.x_; } - inline void MessageOfHome::set_x(::int32_t value) + inline int32_t MessageOfHome::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.x) + // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.x) + return _internal_x(); } - inline ::int32_t MessageOfHome::_internal_x() const + inline void MessageOfHome::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void MessageOfHome::_internal_set_x(::int32_t value) + inline void MessageOfHome::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.x) } // int32 y = 2; @@ -6690,24 +6509,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t MessageOfHome::y() const + inline int32_t MessageOfHome::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.y) - return _internal_y(); + return _impl_.y_; } - inline void MessageOfHome::set_y(::int32_t value) + inline int32_t MessageOfHome::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.y) + // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.y) + return _internal_y(); } - inline ::int32_t MessageOfHome::_internal_y() const + inline void MessageOfHome::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void MessageOfHome::_internal_set_y(::int32_t value) + inline void MessageOfHome::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.y) } // int32 hp = 3; @@ -6715,49 +6533,47 @@ namespace protobuf { _impl_.hp_ = 0; } - inline ::int32_t MessageOfHome::hp() const + inline int32_t MessageOfHome::_internal_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.hp) - return _internal_hp(); + return _impl_.hp_; } - inline void MessageOfHome::set_hp(::int32_t value) + inline int32_t MessageOfHome::hp() const { - _internal_set_hp(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.hp) + // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.hp) + return _internal_hp(); } - inline ::int32_t MessageOfHome::_internal_hp() const + inline void MessageOfHome::_internal_set_hp(int32_t value) { - return _impl_.hp_; + _impl_.hp_ = value; } - inline void MessageOfHome::_internal_set_hp(::int32_t value) + inline void MessageOfHome::set_hp(int32_t value) { - ; - _impl_.hp_ = value; + _internal_set_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.hp) } // int64 team_id = 4; inline void MessageOfHome::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfHome::team_id() const + inline int64_t MessageOfHome::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfHome::set_team_id(::int64_t value) + inline int64_t MessageOfHome::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfHome.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfHome::_internal_team_id() const + inline void MessageOfHome::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfHome::_internal_set_team_id(::int64_t value) + inline void MessageOfHome::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfHome.team_id) } // ------------------------------------------------------------------- @@ -6775,7 +6591,11 @@ namespace protobuf } inline void MessageOfMap_Row::clear_cols() { - _internal_mutable_cols()->Clear(); + _impl_.cols_.Clear(); + } + inline ::protobuf::PlaceType MessageOfMap_Row::_internal_cols(int index) const + { + return static_cast<::protobuf::PlaceType>(_impl_.cols_.Get(index)); } inline ::protobuf::PlaceType MessageOfMap_Row::cols(int index) const { @@ -6784,40 +6604,35 @@ namespace protobuf } inline void MessageOfMap_Row::set_cols(int index, ::protobuf::PlaceType value) { - _internal_mutable_cols()->Set(index, value); + _impl_.cols_.Set(index, value); // @@protoc_insertion_point(field_set:protobuf.MessageOfMap.Row.cols) } + inline void MessageOfMap_Row::_internal_add_cols(::protobuf::PlaceType value) + { + _impl_.cols_.Add(value); + } inline void MessageOfMap_Row::add_cols(::protobuf::PlaceType value) { _internal_add_cols(value); // @@protoc_insertion_point(field_add:protobuf.MessageOfMap.Row.cols) } - inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField& MessageOfMap_Row::cols() const + inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField& + MessageOfMap_Row::cols() const { // @@protoc_insertion_point(field_list:protobuf.MessageOfMap.Row.cols) - return _internal_cols(); - } - inline ::PROTOBUF_NAMESPACE_ID::RepeatedField* MessageOfMap_Row::mutable_cols() - { - // @@protoc_insertion_point(field_mutable_list:protobuf.MessageOfMap.Row.cols) - return _internal_mutable_cols(); - } - inline ::protobuf::PlaceType MessageOfMap_Row::_internal_cols(int index) const - { - return static_cast<::protobuf::PlaceType>(_internal_cols().Get(index)); - } - inline void MessageOfMap_Row::_internal_add_cols(::protobuf::PlaceType value) - { - _internal_mutable_cols()->Add(value); - } - inline const ::PROTOBUF_NAMESPACE_ID::RepeatedField& MessageOfMap_Row::_internal_cols() const - { return _impl_.cols_; } - inline ::PROTOBUF_NAMESPACE_ID::RepeatedField* MessageOfMap_Row::_internal_mutable_cols() + inline ::PROTOBUF_NAMESPACE_ID::RepeatedField* + MessageOfMap_Row::_internal_mutable_cols() { return &_impl_.cols_; } + inline ::PROTOBUF_NAMESPACE_ID::RepeatedField* + MessageOfMap_Row::mutable_cols() + { + // @@protoc_insertion_point(field_mutable_list:protobuf.MessageOfMap.Row.cols) + return _internal_mutable_cols(); + } // ------------------------------------------------------------------- @@ -6828,24 +6643,23 @@ namespace protobuf { _impl_.height_ = 0u; } - inline ::uint32_t MessageOfMap::height() const + inline uint32_t MessageOfMap::_internal_height() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfMap.height) - return _internal_height(); + return _impl_.height_; } - inline void MessageOfMap::set_height(::uint32_t value) + inline uint32_t MessageOfMap::height() const { - _internal_set_height(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfMap.height) + // @@protoc_insertion_point(field_get:protobuf.MessageOfMap.height) + return _internal_height(); } - inline ::uint32_t MessageOfMap::_internal_height() const + inline void MessageOfMap::_internal_set_height(uint32_t value) { - return _impl_.height_; + _impl_.height_ = value; } - inline void MessageOfMap::_internal_set_height(::uint32_t value) + inline void MessageOfMap::set_height(uint32_t value) { - ; - _impl_.height_ = value; + _internal_set_height(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfMap.height) } // uint32 width = 2; @@ -6853,24 +6667,23 @@ namespace protobuf { _impl_.width_ = 0u; } - inline ::uint32_t MessageOfMap::width() const + inline uint32_t MessageOfMap::_internal_width() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfMap.width) - return _internal_width(); + return _impl_.width_; } - inline void MessageOfMap::set_width(::uint32_t value) + inline uint32_t MessageOfMap::width() const { - _internal_set_width(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfMap.width) + // @@protoc_insertion_point(field_get:protobuf.MessageOfMap.width) + return _internal_width(); } - inline ::uint32_t MessageOfMap::_internal_width() const + inline void MessageOfMap::_internal_set_width(uint32_t value) { - return _impl_.width_; + _impl_.width_ = value; } - inline void MessageOfMap::_internal_set_width(::uint32_t value) + inline void MessageOfMap::set_width(uint32_t value) { - ; - _impl_.width_ = value; + _internal_set_width(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfMap.width) } // repeated .protobuf.MessageOfMap.Row rows = 3; @@ -6884,22 +6697,22 @@ namespace protobuf } inline void MessageOfMap::clear_rows() { - _internal_mutable_rows()->Clear(); + _impl_.rows_.Clear(); } inline ::protobuf::MessageOfMap_Row* MessageOfMap::mutable_rows(int index) { // @@protoc_insertion_point(field_mutable:protobuf.MessageOfMap.rows) - return _internal_mutable_rows()->Mutable(index); + return _impl_.rows_.Mutable(index); } inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>* MessageOfMap::mutable_rows() { // @@protoc_insertion_point(field_mutable_list:protobuf.MessageOfMap.rows) - return _internal_mutable_rows(); + return &_impl_.rows_; } inline const ::protobuf::MessageOfMap_Row& MessageOfMap::_internal_rows(int index) const { - return _internal_rows().Get(index); + return _impl_.rows_.Get(index); } inline const ::protobuf::MessageOfMap_Row& MessageOfMap::rows(int index) const { @@ -6908,7 +6721,7 @@ namespace protobuf } inline ::protobuf::MessageOfMap_Row* MessageOfMap::_internal_add_rows() { - return _internal_mutable_rows()->Add(); + return _impl_.rows_.Add(); } inline ::protobuf::MessageOfMap_Row* MessageOfMap::add_rows() { @@ -6920,18 +6733,8 @@ namespace protobuf MessageOfMap::rows() const { // @@protoc_insertion_point(field_list:protobuf.MessageOfMap.rows) - return _internal_rows(); - } - inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>& - MessageOfMap::_internal_rows() const - { return _impl_.rows_; } - inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfMap_Row>* - MessageOfMap::_internal_mutable_rows() - { - return &_impl_.rows_; - } // ------------------------------------------------------------------- @@ -6940,51 +6743,49 @@ namespace protobuf // int64 team_id = 1; inline void MessageOfTeam::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MessageOfTeam::team_id() const + inline int64_t MessageOfTeam::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MessageOfTeam::set_team_id(::int64_t value) + inline int64_t MessageOfTeam::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.team_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.team_id) + return _internal_team_id(); } - inline ::int64_t MessageOfTeam::_internal_team_id() const + inline void MessageOfTeam::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MessageOfTeam::_internal_set_team_id(::int64_t value) + inline void MessageOfTeam::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.team_id) } // int64 player_id = 2; inline void MessageOfTeam::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t MessageOfTeam::player_id() const + inline int64_t MessageOfTeam::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void MessageOfTeam::set_player_id(::int64_t value) + inline int64_t MessageOfTeam::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.player_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.player_id) + return _internal_player_id(); } - inline ::int64_t MessageOfTeam::_internal_player_id() const + inline void MessageOfTeam::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void MessageOfTeam::_internal_set_player_id(::int64_t value) + inline void MessageOfTeam::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.player_id) } // int32 score = 3; @@ -6992,24 +6793,23 @@ namespace protobuf { _impl_.score_ = 0; } - inline ::int32_t MessageOfTeam::score() const + inline int32_t MessageOfTeam::_internal_score() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.score) - return _internal_score(); + return _impl_.score_; } - inline void MessageOfTeam::set_score(::int32_t value) + inline int32_t MessageOfTeam::score() const { - _internal_set_score(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.score) + // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.score) + return _internal_score(); } - inline ::int32_t MessageOfTeam::_internal_score() const + inline void MessageOfTeam::_internal_set_score(int32_t value) { - return _impl_.score_; + _impl_.score_ = value; } - inline void MessageOfTeam::_internal_set_score(::int32_t value) + inline void MessageOfTeam::set_score(int32_t value) { - ; - _impl_.score_ = value; + _internal_set_score(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.score) } // int32 money = 4; @@ -7017,49 +6817,47 @@ namespace protobuf { _impl_.money_ = 0; } - inline ::int32_t MessageOfTeam::money() const + inline int32_t MessageOfTeam::_internal_money() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.money) - return _internal_money(); + return _impl_.money_; } - inline void MessageOfTeam::set_money(::int32_t value) + inline int32_t MessageOfTeam::money() const { - _internal_set_money(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.money) + // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.money) + return _internal_money(); } - inline ::int32_t MessageOfTeam::_internal_money() const + inline void MessageOfTeam::_internal_set_money(int32_t value) { - return _impl_.money_; + _impl_.money_ = value; } - inline void MessageOfTeam::_internal_set_money(::int32_t value) + inline void MessageOfTeam::set_money(int32_t value) { - ; - _impl_.money_ = value; + _internal_set_money(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.money) } // int64 guid = 5; inline void MessageOfTeam::clear_guid() { - _impl_.guid_ = ::int64_t{0}; + _impl_.guid_ = int64_t{0}; } - inline ::int64_t MessageOfTeam::guid() const + inline int64_t MessageOfTeam::_internal_guid() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.guid) - return _internal_guid(); + return _impl_.guid_; } - inline void MessageOfTeam::set_guid(::int64_t value) + inline int64_t MessageOfTeam::guid() const { - _internal_set_guid(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.guid) + // @@protoc_insertion_point(field_get:protobuf.MessageOfTeam.guid) + return _internal_guid(); } - inline ::int64_t MessageOfTeam::_internal_guid() const + inline void MessageOfTeam::_internal_set_guid(int64_t value) { - return _impl_.guid_; + _impl_.guid_ = value; } - inline void MessageOfTeam::_internal_set_guid(::int64_t value) + inline void MessageOfTeam::set_guid(int64_t value) { - ; - _impl_.guid_ = value; + _internal_set_guid(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfTeam.guid) } // ------------------------------------------------------------------- @@ -7067,13 +6865,13 @@ namespace protobuf // MessageOfObj // .protobuf.MessageOfShip ship_message = 1; - inline bool MessageOfObj::has_ship_message() const + inline bool MessageOfObj::_internal_has_ship_message() const { return message_of_obj_case() == kShipMessage; } - inline bool MessageOfObj::_internal_has_ship_message() const + inline bool MessageOfObj::has_ship_message() const { - return message_of_obj_case() == kShipMessage; + return _internal_has_ship_message(); } inline void MessageOfObj::set_has_ship_message() { @@ -7081,7 +6879,7 @@ namespace protobuf } inline void MessageOfObj::clear_ship_message() { - if (message_of_obj_case() == kShipMessage) + if (_internal_has_ship_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7093,7 +6891,7 @@ namespace protobuf inline ::protobuf::MessageOfShip* MessageOfObj::release_ship_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.ship_message) - if (message_of_obj_case() == kShipMessage) + if (_internal_has_ship_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfShip* temp = _impl_.message_of_obj_.ship_message_; @@ -7111,7 +6909,7 @@ namespace protobuf } inline const ::protobuf::MessageOfShip& MessageOfObj::_internal_ship_message() const { - return message_of_obj_case() == kShipMessage ? *_impl_.message_of_obj_.ship_message_ : reinterpret_cast<::protobuf::MessageOfShip&>(::protobuf::_MessageOfShip_default_instance_); + return _internal_has_ship_message() ? *_impl_.message_of_obj_.ship_message_ : reinterpret_cast<::protobuf::MessageOfShip&>(::protobuf::_MessageOfShip_default_instance_); } inline const ::protobuf::MessageOfShip& MessageOfObj::ship_message() const { @@ -7121,7 +6919,7 @@ namespace protobuf inline ::protobuf::MessageOfShip* MessageOfObj::unsafe_arena_release_ship_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.ship_message) - if (message_of_obj_case() == kShipMessage) + if (_internal_has_ship_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfShip* temp = _impl_.message_of_obj_.ship_message_; @@ -7145,7 +6943,7 @@ namespace protobuf } inline ::protobuf::MessageOfShip* MessageOfObj::_internal_mutable_ship_message() { - if (message_of_obj_case() != kShipMessage) + if (!_internal_has_ship_message()) { clear_message_of_obj(); set_has_ship_message(); @@ -7161,13 +6959,13 @@ namespace protobuf } // .protobuf.MessageOfBullet bullet_message = 2; - inline bool MessageOfObj::has_bullet_message() const + inline bool MessageOfObj::_internal_has_bullet_message() const { return message_of_obj_case() == kBulletMessage; } - inline bool MessageOfObj::_internal_has_bullet_message() const + inline bool MessageOfObj::has_bullet_message() const { - return message_of_obj_case() == kBulletMessage; + return _internal_has_bullet_message(); } inline void MessageOfObj::set_has_bullet_message() { @@ -7175,7 +6973,7 @@ namespace protobuf } inline void MessageOfObj::clear_bullet_message() { - if (message_of_obj_case() == kBulletMessage) + if (_internal_has_bullet_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7187,7 +6985,7 @@ namespace protobuf inline ::protobuf::MessageOfBullet* MessageOfObj::release_bullet_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.bullet_message) - if (message_of_obj_case() == kBulletMessage) + if (_internal_has_bullet_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfBullet* temp = _impl_.message_of_obj_.bullet_message_; @@ -7205,7 +7003,7 @@ namespace protobuf } inline const ::protobuf::MessageOfBullet& MessageOfObj::_internal_bullet_message() const { - return message_of_obj_case() == kBulletMessage ? *_impl_.message_of_obj_.bullet_message_ : reinterpret_cast<::protobuf::MessageOfBullet&>(::protobuf::_MessageOfBullet_default_instance_); + return _internal_has_bullet_message() ? *_impl_.message_of_obj_.bullet_message_ : reinterpret_cast<::protobuf::MessageOfBullet&>(::protobuf::_MessageOfBullet_default_instance_); } inline const ::protobuf::MessageOfBullet& MessageOfObj::bullet_message() const { @@ -7215,7 +7013,7 @@ namespace protobuf inline ::protobuf::MessageOfBullet* MessageOfObj::unsafe_arena_release_bullet_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.bullet_message) - if (message_of_obj_case() == kBulletMessage) + if (_internal_has_bullet_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfBullet* temp = _impl_.message_of_obj_.bullet_message_; @@ -7239,7 +7037,7 @@ namespace protobuf } inline ::protobuf::MessageOfBullet* MessageOfObj::_internal_mutable_bullet_message() { - if (message_of_obj_case() != kBulletMessage) + if (!_internal_has_bullet_message()) { clear_message_of_obj(); set_has_bullet_message(); @@ -7255,13 +7053,13 @@ namespace protobuf } // .protobuf.MessageOfFactory factory_message = 3; - inline bool MessageOfObj::has_factory_message() const + inline bool MessageOfObj::_internal_has_factory_message() const { return message_of_obj_case() == kFactoryMessage; } - inline bool MessageOfObj::_internal_has_factory_message() const + inline bool MessageOfObj::has_factory_message() const { - return message_of_obj_case() == kFactoryMessage; + return _internal_has_factory_message(); } inline void MessageOfObj::set_has_factory_message() { @@ -7269,7 +7067,7 @@ namespace protobuf } inline void MessageOfObj::clear_factory_message() { - if (message_of_obj_case() == kFactoryMessage) + if (_internal_has_factory_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7281,7 +7079,7 @@ namespace protobuf inline ::protobuf::MessageOfFactory* MessageOfObj::release_factory_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.factory_message) - if (message_of_obj_case() == kFactoryMessage) + if (_internal_has_factory_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfFactory* temp = _impl_.message_of_obj_.factory_message_; @@ -7299,7 +7097,7 @@ namespace protobuf } inline const ::protobuf::MessageOfFactory& MessageOfObj::_internal_factory_message() const { - return message_of_obj_case() == kFactoryMessage ? *_impl_.message_of_obj_.factory_message_ : reinterpret_cast<::protobuf::MessageOfFactory&>(::protobuf::_MessageOfFactory_default_instance_); + return _internal_has_factory_message() ? *_impl_.message_of_obj_.factory_message_ : reinterpret_cast<::protobuf::MessageOfFactory&>(::protobuf::_MessageOfFactory_default_instance_); } inline const ::protobuf::MessageOfFactory& MessageOfObj::factory_message() const { @@ -7309,7 +7107,7 @@ namespace protobuf inline ::protobuf::MessageOfFactory* MessageOfObj::unsafe_arena_release_factory_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.factory_message) - if (message_of_obj_case() == kFactoryMessage) + if (_internal_has_factory_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfFactory* temp = _impl_.message_of_obj_.factory_message_; @@ -7333,7 +7131,7 @@ namespace protobuf } inline ::protobuf::MessageOfFactory* MessageOfObj::_internal_mutable_factory_message() { - if (message_of_obj_case() != kFactoryMessage) + if (!_internal_has_factory_message()) { clear_message_of_obj(); set_has_factory_message(); @@ -7349,13 +7147,13 @@ namespace protobuf } // .protobuf.MessageOfCommunity community_message = 4; - inline bool MessageOfObj::has_community_message() const + inline bool MessageOfObj::_internal_has_community_message() const { return message_of_obj_case() == kCommunityMessage; } - inline bool MessageOfObj::_internal_has_community_message() const + inline bool MessageOfObj::has_community_message() const { - return message_of_obj_case() == kCommunityMessage; + return _internal_has_community_message(); } inline void MessageOfObj::set_has_community_message() { @@ -7363,7 +7161,7 @@ namespace protobuf } inline void MessageOfObj::clear_community_message() { - if (message_of_obj_case() == kCommunityMessage) + if (_internal_has_community_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7375,7 +7173,7 @@ namespace protobuf inline ::protobuf::MessageOfCommunity* MessageOfObj::release_community_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.community_message) - if (message_of_obj_case() == kCommunityMessage) + if (_internal_has_community_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfCommunity* temp = _impl_.message_of_obj_.community_message_; @@ -7393,7 +7191,7 @@ namespace protobuf } inline const ::protobuf::MessageOfCommunity& MessageOfObj::_internal_community_message() const { - return message_of_obj_case() == kCommunityMessage ? *_impl_.message_of_obj_.community_message_ : reinterpret_cast<::protobuf::MessageOfCommunity&>(::protobuf::_MessageOfCommunity_default_instance_); + return _internal_has_community_message() ? *_impl_.message_of_obj_.community_message_ : reinterpret_cast<::protobuf::MessageOfCommunity&>(::protobuf::_MessageOfCommunity_default_instance_); } inline const ::protobuf::MessageOfCommunity& MessageOfObj::community_message() const { @@ -7403,7 +7201,7 @@ namespace protobuf inline ::protobuf::MessageOfCommunity* MessageOfObj::unsafe_arena_release_community_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.community_message) - if (message_of_obj_case() == kCommunityMessage) + if (_internal_has_community_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfCommunity* temp = _impl_.message_of_obj_.community_message_; @@ -7427,7 +7225,7 @@ namespace protobuf } inline ::protobuf::MessageOfCommunity* MessageOfObj::_internal_mutable_community_message() { - if (message_of_obj_case() != kCommunityMessage) + if (!_internal_has_community_message()) { clear_message_of_obj(); set_has_community_message(); @@ -7443,13 +7241,13 @@ namespace protobuf } // .protobuf.MessageOfFort fort_message = 5; - inline bool MessageOfObj::has_fort_message() const + inline bool MessageOfObj::_internal_has_fort_message() const { return message_of_obj_case() == kFortMessage; } - inline bool MessageOfObj::_internal_has_fort_message() const + inline bool MessageOfObj::has_fort_message() const { - return message_of_obj_case() == kFortMessage; + return _internal_has_fort_message(); } inline void MessageOfObj::set_has_fort_message() { @@ -7457,7 +7255,7 @@ namespace protobuf } inline void MessageOfObj::clear_fort_message() { - if (message_of_obj_case() == kFortMessage) + if (_internal_has_fort_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7469,7 +7267,7 @@ namespace protobuf inline ::protobuf::MessageOfFort* MessageOfObj::release_fort_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.fort_message) - if (message_of_obj_case() == kFortMessage) + if (_internal_has_fort_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfFort* temp = _impl_.message_of_obj_.fort_message_; @@ -7487,7 +7285,7 @@ namespace protobuf } inline const ::protobuf::MessageOfFort& MessageOfObj::_internal_fort_message() const { - return message_of_obj_case() == kFortMessage ? *_impl_.message_of_obj_.fort_message_ : reinterpret_cast<::protobuf::MessageOfFort&>(::protobuf::_MessageOfFort_default_instance_); + return _internal_has_fort_message() ? *_impl_.message_of_obj_.fort_message_ : reinterpret_cast<::protobuf::MessageOfFort&>(::protobuf::_MessageOfFort_default_instance_); } inline const ::protobuf::MessageOfFort& MessageOfObj::fort_message() const { @@ -7497,7 +7295,7 @@ namespace protobuf inline ::protobuf::MessageOfFort* MessageOfObj::unsafe_arena_release_fort_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.fort_message) - if (message_of_obj_case() == kFortMessage) + if (_internal_has_fort_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfFort* temp = _impl_.message_of_obj_.fort_message_; @@ -7521,7 +7319,7 @@ namespace protobuf } inline ::protobuf::MessageOfFort* MessageOfObj::_internal_mutable_fort_message() { - if (message_of_obj_case() != kFortMessage) + if (!_internal_has_fort_message()) { clear_message_of_obj(); set_has_fort_message(); @@ -7537,13 +7335,13 @@ namespace protobuf } // .protobuf.MessageOfWormhole wormhole_message = 6; - inline bool MessageOfObj::has_wormhole_message() const + inline bool MessageOfObj::_internal_has_wormhole_message() const { return message_of_obj_case() == kWormholeMessage; } - inline bool MessageOfObj::_internal_has_wormhole_message() const + inline bool MessageOfObj::has_wormhole_message() const { - return message_of_obj_case() == kWormholeMessage; + return _internal_has_wormhole_message(); } inline void MessageOfObj::set_has_wormhole_message() { @@ -7551,7 +7349,7 @@ namespace protobuf } inline void MessageOfObj::clear_wormhole_message() { - if (message_of_obj_case() == kWormholeMessage) + if (_internal_has_wormhole_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7563,7 +7361,7 @@ namespace protobuf inline ::protobuf::MessageOfWormhole* MessageOfObj::release_wormhole_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.wormhole_message) - if (message_of_obj_case() == kWormholeMessage) + if (_internal_has_wormhole_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfWormhole* temp = _impl_.message_of_obj_.wormhole_message_; @@ -7581,7 +7379,7 @@ namespace protobuf } inline const ::protobuf::MessageOfWormhole& MessageOfObj::_internal_wormhole_message() const { - return message_of_obj_case() == kWormholeMessage ? *_impl_.message_of_obj_.wormhole_message_ : reinterpret_cast<::protobuf::MessageOfWormhole&>(::protobuf::_MessageOfWormhole_default_instance_); + return _internal_has_wormhole_message() ? *_impl_.message_of_obj_.wormhole_message_ : reinterpret_cast<::protobuf::MessageOfWormhole&>(::protobuf::_MessageOfWormhole_default_instance_); } inline const ::protobuf::MessageOfWormhole& MessageOfObj::wormhole_message() const { @@ -7591,7 +7389,7 @@ namespace protobuf inline ::protobuf::MessageOfWormhole* MessageOfObj::unsafe_arena_release_wormhole_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.wormhole_message) - if (message_of_obj_case() == kWormholeMessage) + if (_internal_has_wormhole_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfWormhole* temp = _impl_.message_of_obj_.wormhole_message_; @@ -7615,7 +7413,7 @@ namespace protobuf } inline ::protobuf::MessageOfWormhole* MessageOfObj::_internal_mutable_wormhole_message() { - if (message_of_obj_case() != kWormholeMessage) + if (!_internal_has_wormhole_message()) { clear_message_of_obj(); set_has_wormhole_message(); @@ -7631,13 +7429,13 @@ namespace protobuf } // .protobuf.MessageOfHome home_message = 7; - inline bool MessageOfObj::has_home_message() const + inline bool MessageOfObj::_internal_has_home_message() const { return message_of_obj_case() == kHomeMessage; } - inline bool MessageOfObj::_internal_has_home_message() const + inline bool MessageOfObj::has_home_message() const { - return message_of_obj_case() == kHomeMessage; + return _internal_has_home_message(); } inline void MessageOfObj::set_has_home_message() { @@ -7645,7 +7443,7 @@ namespace protobuf } inline void MessageOfObj::clear_home_message() { - if (message_of_obj_case() == kHomeMessage) + if (_internal_has_home_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7657,7 +7455,7 @@ namespace protobuf inline ::protobuf::MessageOfHome* MessageOfObj::release_home_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.home_message) - if (message_of_obj_case() == kHomeMessage) + if (_internal_has_home_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfHome* temp = _impl_.message_of_obj_.home_message_; @@ -7675,7 +7473,7 @@ namespace protobuf } inline const ::protobuf::MessageOfHome& MessageOfObj::_internal_home_message() const { - return message_of_obj_case() == kHomeMessage ? *_impl_.message_of_obj_.home_message_ : reinterpret_cast<::protobuf::MessageOfHome&>(::protobuf::_MessageOfHome_default_instance_); + return _internal_has_home_message() ? *_impl_.message_of_obj_.home_message_ : reinterpret_cast<::protobuf::MessageOfHome&>(::protobuf::_MessageOfHome_default_instance_); } inline const ::protobuf::MessageOfHome& MessageOfObj::home_message() const { @@ -7685,7 +7483,7 @@ namespace protobuf inline ::protobuf::MessageOfHome* MessageOfObj::unsafe_arena_release_home_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.home_message) - if (message_of_obj_case() == kHomeMessage) + if (_internal_has_home_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfHome* temp = _impl_.message_of_obj_.home_message_; @@ -7709,7 +7507,7 @@ namespace protobuf } inline ::protobuf::MessageOfHome* MessageOfObj::_internal_mutable_home_message() { - if (message_of_obj_case() != kHomeMessage) + if (!_internal_has_home_message()) { clear_message_of_obj(); set_has_home_message(); @@ -7725,13 +7523,13 @@ namespace protobuf } // .protobuf.MessageOfResource resource_message = 8; - inline bool MessageOfObj::has_resource_message() const + inline bool MessageOfObj::_internal_has_resource_message() const { return message_of_obj_case() == kResourceMessage; } - inline bool MessageOfObj::_internal_has_resource_message() const + inline bool MessageOfObj::has_resource_message() const { - return message_of_obj_case() == kResourceMessage; + return _internal_has_resource_message(); } inline void MessageOfObj::set_has_resource_message() { @@ -7739,7 +7537,7 @@ namespace protobuf } inline void MessageOfObj::clear_resource_message() { - if (message_of_obj_case() == kResourceMessage) + if (_internal_has_resource_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7751,7 +7549,7 @@ namespace protobuf inline ::protobuf::MessageOfResource* MessageOfObj::release_resource_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.resource_message) - if (message_of_obj_case() == kResourceMessage) + if (_internal_has_resource_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfResource* temp = _impl_.message_of_obj_.resource_message_; @@ -7769,7 +7567,7 @@ namespace protobuf } inline const ::protobuf::MessageOfResource& MessageOfObj::_internal_resource_message() const { - return message_of_obj_case() == kResourceMessage ? *_impl_.message_of_obj_.resource_message_ : reinterpret_cast<::protobuf::MessageOfResource&>(::protobuf::_MessageOfResource_default_instance_); + return _internal_has_resource_message() ? *_impl_.message_of_obj_.resource_message_ : reinterpret_cast<::protobuf::MessageOfResource&>(::protobuf::_MessageOfResource_default_instance_); } inline const ::protobuf::MessageOfResource& MessageOfObj::resource_message() const { @@ -7779,7 +7577,7 @@ namespace protobuf inline ::protobuf::MessageOfResource* MessageOfObj::unsafe_arena_release_resource_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.resource_message) - if (message_of_obj_case() == kResourceMessage) + if (_internal_has_resource_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfResource* temp = _impl_.message_of_obj_.resource_message_; @@ -7803,7 +7601,7 @@ namespace protobuf } inline ::protobuf::MessageOfResource* MessageOfObj::_internal_mutable_resource_message() { - if (message_of_obj_case() != kResourceMessage) + if (!_internal_has_resource_message()) { clear_message_of_obj(); set_has_resource_message(); @@ -7819,13 +7617,13 @@ namespace protobuf } // .protobuf.MessageOfMap map_message = 9; - inline bool MessageOfObj::has_map_message() const + inline bool MessageOfObj::_internal_has_map_message() const { return message_of_obj_case() == kMapMessage; } - inline bool MessageOfObj::_internal_has_map_message() const + inline bool MessageOfObj::has_map_message() const { - return message_of_obj_case() == kMapMessage; + return _internal_has_map_message(); } inline void MessageOfObj::set_has_map_message() { @@ -7833,7 +7631,7 @@ namespace protobuf } inline void MessageOfObj::clear_map_message() { - if (message_of_obj_case() == kMapMessage) + if (_internal_has_map_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7845,7 +7643,7 @@ namespace protobuf inline ::protobuf::MessageOfMap* MessageOfObj::release_map_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.map_message) - if (message_of_obj_case() == kMapMessage) + if (_internal_has_map_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfMap* temp = _impl_.message_of_obj_.map_message_; @@ -7863,7 +7661,7 @@ namespace protobuf } inline const ::protobuf::MessageOfMap& MessageOfObj::_internal_map_message() const { - return message_of_obj_case() == kMapMessage ? *_impl_.message_of_obj_.map_message_ : reinterpret_cast<::protobuf::MessageOfMap&>(::protobuf::_MessageOfMap_default_instance_); + return _internal_has_map_message() ? *_impl_.message_of_obj_.map_message_ : reinterpret_cast<::protobuf::MessageOfMap&>(::protobuf::_MessageOfMap_default_instance_); } inline const ::protobuf::MessageOfMap& MessageOfObj::map_message() const { @@ -7873,7 +7671,7 @@ namespace protobuf inline ::protobuf::MessageOfMap* MessageOfObj::unsafe_arena_release_map_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.map_message) - if (message_of_obj_case() == kMapMessage) + if (_internal_has_map_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfMap* temp = _impl_.message_of_obj_.map_message_; @@ -7897,7 +7695,7 @@ namespace protobuf } inline ::protobuf::MessageOfMap* MessageOfObj::_internal_mutable_map_message() { - if (message_of_obj_case() != kMapMessage) + if (!_internal_has_map_message()) { clear_message_of_obj(); set_has_map_message(); @@ -7913,13 +7711,13 @@ namespace protobuf } // .protobuf.MessageOfNews news_message = 10; - inline bool MessageOfObj::has_news_message() const + inline bool MessageOfObj::_internal_has_news_message() const { return message_of_obj_case() == kNewsMessage; } - inline bool MessageOfObj::_internal_has_news_message() const + inline bool MessageOfObj::has_news_message() const { - return message_of_obj_case() == kNewsMessage; + return _internal_has_news_message(); } inline void MessageOfObj::set_has_news_message() { @@ -7927,7 +7725,7 @@ namespace protobuf } inline void MessageOfObj::clear_news_message() { - if (message_of_obj_case() == kNewsMessage) + if (_internal_has_news_message()) { if (GetArenaForAllocation() == nullptr) { @@ -7939,7 +7737,7 @@ namespace protobuf inline ::protobuf::MessageOfNews* MessageOfObj::release_news_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.news_message) - if (message_of_obj_case() == kNewsMessage) + if (_internal_has_news_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfNews* temp = _impl_.message_of_obj_.news_message_; @@ -7957,7 +7755,7 @@ namespace protobuf } inline const ::protobuf::MessageOfNews& MessageOfObj::_internal_news_message() const { - return message_of_obj_case() == kNewsMessage ? *_impl_.message_of_obj_.news_message_ : reinterpret_cast<::protobuf::MessageOfNews&>(::protobuf::_MessageOfNews_default_instance_); + return _internal_has_news_message() ? *_impl_.message_of_obj_.news_message_ : reinterpret_cast<::protobuf::MessageOfNews&>(::protobuf::_MessageOfNews_default_instance_); } inline const ::protobuf::MessageOfNews& MessageOfObj::news_message() const { @@ -7967,7 +7765,7 @@ namespace protobuf inline ::protobuf::MessageOfNews* MessageOfObj::unsafe_arena_release_news_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.news_message) - if (message_of_obj_case() == kNewsMessage) + if (_internal_has_news_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfNews* temp = _impl_.message_of_obj_.news_message_; @@ -7991,7 +7789,7 @@ namespace protobuf } inline ::protobuf::MessageOfNews* MessageOfObj::_internal_mutable_news_message() { - if (message_of_obj_case() != kNewsMessage) + if (!_internal_has_news_message()) { clear_message_of_obj(); set_has_news_message(); @@ -8007,13 +7805,13 @@ namespace protobuf } // .protobuf.MessageOfBombedBullet bombed_bullet_message = 11; - inline bool MessageOfObj::has_bombed_bullet_message() const + inline bool MessageOfObj::_internal_has_bombed_bullet_message() const { return message_of_obj_case() == kBombedBulletMessage; } - inline bool MessageOfObj::_internal_has_bombed_bullet_message() const + inline bool MessageOfObj::has_bombed_bullet_message() const { - return message_of_obj_case() == kBombedBulletMessage; + return _internal_has_bombed_bullet_message(); } inline void MessageOfObj::set_has_bombed_bullet_message() { @@ -8021,7 +7819,7 @@ namespace protobuf } inline void MessageOfObj::clear_bombed_bullet_message() { - if (message_of_obj_case() == kBombedBulletMessage) + if (_internal_has_bombed_bullet_message()) { if (GetArenaForAllocation() == nullptr) { @@ -8033,7 +7831,7 @@ namespace protobuf inline ::protobuf::MessageOfBombedBullet* MessageOfObj::release_bombed_bullet_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.bombed_bullet_message) - if (message_of_obj_case() == kBombedBulletMessage) + if (_internal_has_bombed_bullet_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfBombedBullet* temp = _impl_.message_of_obj_.bombed_bullet_message_; @@ -8051,7 +7849,7 @@ namespace protobuf } inline const ::protobuf::MessageOfBombedBullet& MessageOfObj::_internal_bombed_bullet_message() const { - return message_of_obj_case() == kBombedBulletMessage ? *_impl_.message_of_obj_.bombed_bullet_message_ : reinterpret_cast<::protobuf::MessageOfBombedBullet&>(::protobuf::_MessageOfBombedBullet_default_instance_); + return _internal_has_bombed_bullet_message() ? *_impl_.message_of_obj_.bombed_bullet_message_ : reinterpret_cast<::protobuf::MessageOfBombedBullet&>(::protobuf::_MessageOfBombedBullet_default_instance_); } inline const ::protobuf::MessageOfBombedBullet& MessageOfObj::bombed_bullet_message() const { @@ -8061,7 +7859,7 @@ namespace protobuf inline ::protobuf::MessageOfBombedBullet* MessageOfObj::unsafe_arena_release_bombed_bullet_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.bombed_bullet_message) - if (message_of_obj_case() == kBombedBulletMessage) + if (_internal_has_bombed_bullet_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfBombedBullet* temp = _impl_.message_of_obj_.bombed_bullet_message_; @@ -8085,7 +7883,7 @@ namespace protobuf } inline ::protobuf::MessageOfBombedBullet* MessageOfObj::_internal_mutable_bombed_bullet_message() { - if (message_of_obj_case() != kBombedBulletMessage) + if (!_internal_has_bombed_bullet_message()) { clear_message_of_obj(); set_has_bombed_bullet_message(); @@ -8101,13 +7899,13 @@ namespace protobuf } // .protobuf.MessageOfTeam team_message = 12; - inline bool MessageOfObj::has_team_message() const + inline bool MessageOfObj::_internal_has_team_message() const { return message_of_obj_case() == kTeamMessage; } - inline bool MessageOfObj::_internal_has_team_message() const + inline bool MessageOfObj::has_team_message() const { - return message_of_obj_case() == kTeamMessage; + return _internal_has_team_message(); } inline void MessageOfObj::set_has_team_message() { @@ -8115,7 +7913,7 @@ namespace protobuf } inline void MessageOfObj::clear_team_message() { - if (message_of_obj_case() == kTeamMessage) + if (_internal_has_team_message()) { if (GetArenaForAllocation() == nullptr) { @@ -8127,7 +7925,7 @@ namespace protobuf inline ::protobuf::MessageOfTeam* MessageOfObj::release_team_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfObj.team_message) - if (message_of_obj_case() == kTeamMessage) + if (_internal_has_team_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfTeam* temp = _impl_.message_of_obj_.team_message_; @@ -8145,7 +7943,7 @@ namespace protobuf } inline const ::protobuf::MessageOfTeam& MessageOfObj::_internal_team_message() const { - return message_of_obj_case() == kTeamMessage ? *_impl_.message_of_obj_.team_message_ : reinterpret_cast<::protobuf::MessageOfTeam&>(::protobuf::_MessageOfTeam_default_instance_); + return _internal_has_team_message() ? *_impl_.message_of_obj_.team_message_ : reinterpret_cast<::protobuf::MessageOfTeam&>(::protobuf::_MessageOfTeam_default_instance_); } inline const ::protobuf::MessageOfTeam& MessageOfObj::team_message() const { @@ -8155,7 +7953,7 @@ namespace protobuf inline ::protobuf::MessageOfTeam* MessageOfObj::unsafe_arena_release_team_message() { // @@protoc_insertion_point(field_unsafe_arena_release:protobuf.MessageOfObj.team_message) - if (message_of_obj_case() == kTeamMessage) + if (_internal_has_team_message()) { clear_has_message_of_obj(); ::protobuf::MessageOfTeam* temp = _impl_.message_of_obj_.team_message_; @@ -8177,112 +7975,205 @@ namespace protobuf } // @@protoc_insertion_point(field_unsafe_arena_set_allocated:protobuf.MessageOfObj.team_message) } - inline ::protobuf::MessageOfTeam* MessageOfObj::_internal_mutable_team_message() + inline ::protobuf::MessageOfTeam* MessageOfObj::_internal_mutable_team_message() + { + if (!_internal_has_team_message()) + { + clear_message_of_obj(); + set_has_team_message(); + _impl_.message_of_obj_.team_message_ = CreateMaybeMessage<::protobuf::MessageOfTeam>(GetArenaForAllocation()); + } + return _impl_.message_of_obj_.team_message_; + } + inline ::protobuf::MessageOfTeam* MessageOfObj::mutable_team_message() + { + ::protobuf::MessageOfTeam* _msg = _internal_mutable_team_message(); + // @@protoc_insertion_point(field_mutable:protobuf.MessageOfObj.team_message) + return _msg; + } + + inline bool MessageOfObj::has_message_of_obj() const + { + return message_of_obj_case() != MESSAGE_OF_OBJ_NOT_SET; + } + inline void MessageOfObj::clear_has_message_of_obj() + { + _impl_._oneof_case_[0] = MESSAGE_OF_OBJ_NOT_SET; + } + inline MessageOfObj::MessageOfObjCase MessageOfObj::message_of_obj_case() const + { + return MessageOfObj::MessageOfObjCase(_impl_._oneof_case_[0]); + } + // ------------------------------------------------------------------- + + // MessageOfAll + + // int32 game_time = 1; + inline void MessageOfAll::clear_game_time() + { + _impl_.game_time_ = 0; + } + inline int32_t MessageOfAll::_internal_game_time() const + { + return _impl_.game_time_; + } + inline int32_t MessageOfAll::game_time() const + { + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.game_time) + return _internal_game_time(); + } + inline void MessageOfAll::_internal_set_game_time(int32_t value) + { + _impl_.game_time_ = value; + } + inline void MessageOfAll::set_game_time(int32_t value) + { + _internal_set_game_time(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.game_time) + } + + // int32 red_team_score = 2; + inline void MessageOfAll::clear_red_team_score() + { + _impl_.red_team_score_ = 0; + } + inline int32_t MessageOfAll::_internal_red_team_score() const + { + return _impl_.red_team_score_; + } + inline int32_t MessageOfAll::red_team_score() const + { + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.red_team_score) + return _internal_red_team_score(); + } + inline void MessageOfAll::_internal_set_red_team_score(int32_t value) + { + _impl_.red_team_score_ = value; + } + inline void MessageOfAll::set_red_team_score(int32_t value) + { + _internal_set_red_team_score(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.red_team_score) + } + + // int32 blue_team_score = 3; + inline void MessageOfAll::clear_blue_team_score() + { + _impl_.blue_team_score_ = 0; + } + inline int32_t MessageOfAll::_internal_blue_team_score() const + { + return _impl_.blue_team_score_; + } + inline int32_t MessageOfAll::blue_team_score() const { - if (message_of_obj_case() != kTeamMessage) - { - clear_message_of_obj(); - set_has_team_message(); - _impl_.message_of_obj_.team_message_ = CreateMaybeMessage<::protobuf::MessageOfTeam>(GetArenaForAllocation()); - } - return _impl_.message_of_obj_.team_message_; + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.blue_team_score) + return _internal_blue_team_score(); } - inline ::protobuf::MessageOfTeam* MessageOfObj::mutable_team_message() + inline void MessageOfAll::_internal_set_blue_team_score(int32_t value) { - ::protobuf::MessageOfTeam* _msg = _internal_mutable_team_message(); - // @@protoc_insertion_point(field_mutable:protobuf.MessageOfObj.team_message) - return _msg; + _impl_.blue_team_score_ = value; + } + inline void MessageOfAll::set_blue_team_score(int32_t value) + { + _internal_set_blue_team_score(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.blue_team_score) } - inline bool MessageOfObj::has_message_of_obj() const + // int32 red_team_money = 4; + inline void MessageOfAll::clear_red_team_money() { - return message_of_obj_case() != MESSAGE_OF_OBJ_NOT_SET; + _impl_.red_team_money_ = 0; } - inline void MessageOfObj::clear_has_message_of_obj() + inline int32_t MessageOfAll::_internal_red_team_money() const { - _impl_._oneof_case_[0] = MESSAGE_OF_OBJ_NOT_SET; + return _impl_.red_team_money_; } - inline MessageOfObj::MessageOfObjCase MessageOfObj::message_of_obj_case() const + inline int32_t MessageOfAll::red_team_money() const { - return MessageOfObj::MessageOfObjCase(_impl_._oneof_case_[0]); + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.red_team_money) + return _internal_red_team_money(); + } + inline void MessageOfAll::_internal_set_red_team_money(int32_t value) + { + _impl_.red_team_money_ = value; + } + inline void MessageOfAll::set_red_team_money(int32_t value) + { + _internal_set_red_team_money(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.red_team_money) } - // ------------------------------------------------------------------- - - // MessageOfAll - // int32 game_time = 1; - inline void MessageOfAll::clear_game_time() + // int32 blue_team_money = 5; + inline void MessageOfAll::clear_blue_team_money() { - _impl_.game_time_ = 0; + _impl_.blue_team_money_ = 0; } - inline ::int32_t MessageOfAll::game_time() const + inline int32_t MessageOfAll::_internal_blue_team_money() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.game_time) - return _internal_game_time(); + return _impl_.blue_team_money_; } - inline void MessageOfAll::set_game_time(::int32_t value) + inline int32_t MessageOfAll::blue_team_money() const { - _internal_set_game_time(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.game_time) + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.blue_team_money) + return _internal_blue_team_money(); } - inline ::int32_t MessageOfAll::_internal_game_time() const + inline void MessageOfAll::_internal_set_blue_team_money(int32_t value) { - return _impl_.game_time_; + _impl_.blue_team_money_ = value; } - inline void MessageOfAll::_internal_set_game_time(::int32_t value) + inline void MessageOfAll::set_blue_team_money(int32_t value) { - ; - _impl_.game_time_ = value; + _internal_set_blue_team_money(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.blue_team_money) } - // int32 red_team_score = 2; - inline void MessageOfAll::clear_red_team_score() + // int32 red_home_hp = 6; + inline void MessageOfAll::clear_red_home_hp() { - _impl_.red_team_score_ = 0; + _impl_.red_home_hp_ = 0; } - inline ::int32_t MessageOfAll::red_team_score() const + inline int32_t MessageOfAll::_internal_red_home_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.red_team_score) - return _internal_red_team_score(); + return _impl_.red_home_hp_; } - inline void MessageOfAll::set_red_team_score(::int32_t value) + inline int32_t MessageOfAll::red_home_hp() const { - _internal_set_red_team_score(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.red_team_score) + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.red_home_hp) + return _internal_red_home_hp(); } - inline ::int32_t MessageOfAll::_internal_red_team_score() const + inline void MessageOfAll::_internal_set_red_home_hp(int32_t value) { - return _impl_.red_team_score_; + _impl_.red_home_hp_ = value; } - inline void MessageOfAll::_internal_set_red_team_score(::int32_t value) + inline void MessageOfAll::set_red_home_hp(int32_t value) { - ; - _impl_.red_team_score_ = value; + _internal_set_red_home_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.red_home_hp) } - // int32 blue_team_score = 3; - inline void MessageOfAll::clear_blue_team_score() + // int32 blue_home_hp = 7; + inline void MessageOfAll::clear_blue_home_hp() { - _impl_.blue_team_score_ = 0; + _impl_.blue_home_hp_ = 0; } - inline ::int32_t MessageOfAll::blue_team_score() const + inline int32_t MessageOfAll::_internal_blue_home_hp() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.blue_team_score) - return _internal_blue_team_score(); + return _impl_.blue_home_hp_; } - inline void MessageOfAll::set_blue_team_score(::int32_t value) + inline int32_t MessageOfAll::blue_home_hp() const { - _internal_set_blue_team_score(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.blue_team_score) + // @@protoc_insertion_point(field_get:protobuf.MessageOfAll.blue_home_hp) + return _internal_blue_home_hp(); } - inline ::int32_t MessageOfAll::_internal_blue_team_score() const + inline void MessageOfAll::_internal_set_blue_home_hp(int32_t value) { - return _impl_.blue_team_score_; + _impl_.blue_home_hp_ = value; } - inline void MessageOfAll::_internal_set_blue_team_score(::int32_t value) + inline void MessageOfAll::set_blue_home_hp(int32_t value) { - ; - _impl_.blue_team_score_ = value; + _internal_set_blue_home_hp(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfAll.blue_home_hp) } // ------------------------------------------------------------------- @@ -8300,22 +8191,22 @@ namespace protobuf } inline void MessageToClient::clear_obj_message() { - _internal_mutable_obj_message()->Clear(); + _impl_.obj_message_.Clear(); } inline ::protobuf::MessageOfObj* MessageToClient::mutable_obj_message(int index) { // @@protoc_insertion_point(field_mutable:protobuf.MessageToClient.obj_message) - return _internal_mutable_obj_message()->Mutable(index); + return _impl_.obj_message_.Mutable(index); } inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>* MessageToClient::mutable_obj_message() { // @@protoc_insertion_point(field_mutable_list:protobuf.MessageToClient.obj_message) - return _internal_mutable_obj_message(); + return &_impl_.obj_message_; } inline const ::protobuf::MessageOfObj& MessageToClient::_internal_obj_message(int index) const { - return _internal_obj_message().Get(index); + return _impl_.obj_message_.Get(index); } inline const ::protobuf::MessageOfObj& MessageToClient::obj_message(int index) const { @@ -8324,7 +8215,7 @@ namespace protobuf } inline ::protobuf::MessageOfObj* MessageToClient::_internal_add_obj_message() { - return _internal_mutable_obj_message()->Add(); + return _impl_.obj_message_.Add(); } inline ::protobuf::MessageOfObj* MessageToClient::add_obj_message() { @@ -8336,56 +8227,49 @@ namespace protobuf MessageToClient::obj_message() const { // @@protoc_insertion_point(field_list:protobuf.MessageToClient.obj_message) - return _internal_obj_message(); - } - inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>& - MessageToClient::_internal_obj_message() const - { return _impl_.obj_message_; } - inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfObj>* - MessageToClient::_internal_mutable_obj_message() - { - return &_impl_.obj_message_; - } // .protobuf.GameState game_state = 2; inline void MessageToClient::clear_game_state() { _impl_.game_state_ = 0; } + inline ::protobuf::GameState MessageToClient::_internal_game_state() const + { + return static_cast<::protobuf::GameState>(_impl_.game_state_); + } inline ::protobuf::GameState MessageToClient::game_state() const { // @@protoc_insertion_point(field_get:protobuf.MessageToClient.game_state) return _internal_game_state(); } + inline void MessageToClient::_internal_set_game_state(::protobuf::GameState value) + { + _impl_.game_state_ = value; + } inline void MessageToClient::set_game_state(::protobuf::GameState value) { _internal_set_game_state(value); // @@protoc_insertion_point(field_set:protobuf.MessageToClient.game_state) } - inline ::protobuf::GameState MessageToClient::_internal_game_state() const - { - return static_cast<::protobuf::GameState>(_impl_.game_state_); - } - inline void MessageToClient::_internal_set_game_state(::protobuf::GameState value) - { - ; - _impl_.game_state_ = value; - } // .protobuf.MessageOfAll all_message = 3; + inline bool MessageToClient::_internal_has_all_message() const + { + return this != internal_default_instance() && _impl_.all_message_ != nullptr; + } inline bool MessageToClient::has_all_message() const { - bool value = (_impl_._has_bits_[0] & 0x00000001u) != 0; - PROTOBUF_ASSUME(!value || _impl_.all_message_ != nullptr); - return value; + return _internal_has_all_message(); } inline void MessageToClient::clear_all_message() { - if (_impl_.all_message_ != nullptr) - _impl_.all_message_->Clear(); - _impl_._has_bits_[0] &= ~0x00000001u; + if (GetArenaForAllocation() == nullptr && _impl_.all_message_ != nullptr) + { + delete _impl_.all_message_; + } + _impl_.all_message_ = nullptr; } inline const ::protobuf::MessageOfAll& MessageToClient::_internal_all_message() const { @@ -8408,17 +8292,14 @@ namespace protobuf _impl_.all_message_ = all_message; if (all_message) { - _impl_._has_bits_[0] |= 0x00000001u; } else { - _impl_._has_bits_[0] &= ~0x00000001u; } // @@protoc_insertion_point(field_unsafe_arena_set_allocated:protobuf.MessageToClient.all_message) } inline ::protobuf::MessageOfAll* MessageToClient::release_all_message() { - _impl_._has_bits_[0] &= ~0x00000001u; ::protobuf::MessageOfAll* temp = _impl_.all_message_; _impl_.all_message_ = nullptr; #ifdef PROTOBUF_FORCE_COPY_IN_RELEASE @@ -8439,14 +8320,13 @@ namespace protobuf inline ::protobuf::MessageOfAll* MessageToClient::unsafe_arena_release_all_message() { // @@protoc_insertion_point(field_release:protobuf.MessageToClient.all_message) - _impl_._has_bits_[0] &= ~0x00000001u; + ::protobuf::MessageOfAll* temp = _impl_.all_message_; _impl_.all_message_ = nullptr; return temp; } inline ::protobuf::MessageOfAll* MessageToClient::_internal_mutable_all_message() { - _impl_._has_bits_[0] |= 0x00000001u; if (_impl_.all_message_ == nullptr) { auto* p = CreateMaybeMessage<::protobuf::MessageOfAll>(GetArenaForAllocation()); @@ -8477,11 +8357,9 @@ namespace protobuf message_arena, all_message, submessage_arena ); } - _impl_._has_bits_[0] |= 0x00000001u; } else { - _impl_._has_bits_[0] &= ~0x00000001u; } _impl_.all_message_ = all_message; // @@protoc_insertion_point(field_set_allocated:protobuf.MessageToClient.all_message) @@ -8494,26 +8372,25 @@ namespace protobuf // int64 actual_speed = 1; inline void MoveRes::clear_actual_speed() { - _impl_.actual_speed_ = ::int64_t{0}; + _impl_.actual_speed_ = int64_t{0}; } - inline ::int64_t MoveRes::actual_speed() const + inline int64_t MoveRes::_internal_actual_speed() const { - // @@protoc_insertion_point(field_get:protobuf.MoveRes.actual_speed) - return _internal_actual_speed(); + return _impl_.actual_speed_; } - inline void MoveRes::set_actual_speed(::int64_t value) + inline int64_t MoveRes::actual_speed() const { - _internal_set_actual_speed(value); - // @@protoc_insertion_point(field_set:protobuf.MoveRes.actual_speed) + // @@protoc_insertion_point(field_get:protobuf.MoveRes.actual_speed) + return _internal_actual_speed(); } - inline ::int64_t MoveRes::_internal_actual_speed() const + inline void MoveRes::_internal_set_actual_speed(int64_t value) { - return _impl_.actual_speed_; + _impl_.actual_speed_ = value; } - inline void MoveRes::_internal_set_actual_speed(::int64_t value) + inline void MoveRes::set_actual_speed(int64_t value) { - ; - _impl_.actual_speed_ = value; + _internal_set_actual_speed(value); + // @@protoc_insertion_point(field_set:protobuf.MoveRes.actual_speed) } // double actual_angle = 2; @@ -8521,50 +8398,48 @@ namespace protobuf { _impl_.actual_angle_ = 0; } + inline double MoveRes::_internal_actual_angle() const + { + return _impl_.actual_angle_; + } inline double MoveRes::actual_angle() const { // @@protoc_insertion_point(field_get:protobuf.MoveRes.actual_angle) return _internal_actual_angle(); } + inline void MoveRes::_internal_set_actual_angle(double value) + { + _impl_.actual_angle_ = value; + } inline void MoveRes::set_actual_angle(double value) { _internal_set_actual_angle(value); // @@protoc_insertion_point(field_set:protobuf.MoveRes.actual_angle) } - inline double MoveRes::_internal_actual_angle() const - { - return _impl_.actual_angle_; - } - inline void MoveRes::_internal_set_actual_angle(double value) - { - ; - _impl_.actual_angle_ = value; - } // bool act_success = 3; inline void MoveRes::clear_act_success() { _impl_.act_success_ = false; } + inline bool MoveRes::_internal_act_success() const + { + return _impl_.act_success_; + } inline bool MoveRes::act_success() const { // @@protoc_insertion_point(field_get:protobuf.MoveRes.act_success) return _internal_act_success(); } + inline void MoveRes::_internal_set_act_success(bool value) + { + _impl_.act_success_ = value; + } inline void MoveRes::set_act_success(bool value) { _internal_set_act_success(value); // @@protoc_insertion_point(field_set:protobuf.MoveRes.act_success) } - inline bool MoveRes::_internal_act_success() const - { - return _impl_.act_success_; - } - inline void MoveRes::_internal_set_act_success(bool value) - { - ; - _impl_.act_success_ = value; - } // ------------------------------------------------------------------- @@ -8575,25 +8450,24 @@ namespace protobuf { _impl_.act_success_ = false; } + inline bool BoolRes::_internal_act_success() const + { + return _impl_.act_success_; + } inline bool BoolRes::act_success() const { // @@protoc_insertion_point(field_get:protobuf.BoolRes.act_success) return _internal_act_success(); } + inline void BoolRes::_internal_set_act_success(bool value) + { + _impl_.act_success_ = value; + } inline void BoolRes::set_act_success(bool value) { _internal_set_act_success(value); // @@protoc_insertion_point(field_set:protobuf.BoolRes.act_success) } - inline bool BoolRes::_internal_act_success() const - { - return _impl_.act_success_; - } - inline void BoolRes::_internal_set_act_success(bool value) - { - ; - _impl_.act_success_ = value; - } // ------------------------------------------------------------------- @@ -8610,22 +8484,22 @@ namespace protobuf } inline void ShipInfoRes::clear_ship_info() { - _internal_mutable_ship_info()->Clear(); + _impl_.ship_info_.Clear(); } inline ::protobuf::MessageOfShip* ShipInfoRes::mutable_ship_info(int index) { // @@protoc_insertion_point(field_mutable:protobuf.ShipInfoRes.ship_info) - return _internal_mutable_ship_info()->Mutable(index); + return _impl_.ship_info_.Mutable(index); } inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>* ShipInfoRes::mutable_ship_info() { // @@protoc_insertion_point(field_mutable_list:protobuf.ShipInfoRes.ship_info) - return _internal_mutable_ship_info(); + return &_impl_.ship_info_; } inline const ::protobuf::MessageOfShip& ShipInfoRes::_internal_ship_info(int index) const { - return _internal_ship_info().Get(index); + return _impl_.ship_info_.Get(index); } inline const ::protobuf::MessageOfShip& ShipInfoRes::ship_info(int index) const { @@ -8634,7 +8508,7 @@ namespace protobuf } inline ::protobuf::MessageOfShip* ShipInfoRes::_internal_add_ship_info() { - return _internal_mutable_ship_info()->Add(); + return _impl_.ship_info_.Add(); } inline ::protobuf::MessageOfShip* ShipInfoRes::add_ship_info() { @@ -8646,18 +8520,8 @@ namespace protobuf ShipInfoRes::ship_info() const { // @@protoc_insertion_point(field_list:protobuf.ShipInfoRes.ship_info) - return _internal_ship_info(); - } - inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>& - ShipInfoRes::_internal_ship_info() const - { return _impl_.ship_info_; } - inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<::protobuf::MessageOfShip>* - ShipInfoRes::_internal_mutable_ship_info() - { - return &_impl_.ship_info_; - } // ------------------------------------------------------------------- @@ -8666,26 +8530,25 @@ namespace protobuf // int64 economy = 1; inline void EcoRes::clear_economy() { - _impl_.economy_ = ::int64_t{0}; + _impl_.economy_ = int64_t{0}; } - inline ::int64_t EcoRes::economy() const + inline int64_t EcoRes::_internal_economy() const { - // @@protoc_insertion_point(field_get:protobuf.EcoRes.economy) - return _internal_economy(); + return _impl_.economy_; } - inline void EcoRes::set_economy(::int64_t value) + inline int64_t EcoRes::economy() const { - _internal_set_economy(value); - // @@protoc_insertion_point(field_set:protobuf.EcoRes.economy) + // @@protoc_insertion_point(field_get:protobuf.EcoRes.economy) + return _internal_economy(); } - inline ::int64_t EcoRes::_internal_economy() const + inline void EcoRes::_internal_set_economy(int64_t value) { - return _impl_.economy_; + _impl_.economy_ = value; } - inline void EcoRes::_internal_set_economy(::int64_t value) + inline void EcoRes::set_economy(int64_t value) { - ; - _impl_.economy_ = value; + _internal_set_economy(value); + // @@protoc_insertion_point(field_set:protobuf.EcoRes.economy) } // ------------------------------------------------------------------- @@ -8693,17 +8556,21 @@ namespace protobuf // MessageOfNews // string text_message = 1; - inline bool MessageOfNews::has_text_message() const + inline bool MessageOfNews::_internal_has_text_message() const { return news_case() == kTextMessage; } + inline bool MessageOfNews::has_text_message() const + { + return _internal_has_text_message(); + } inline void MessageOfNews::set_has_text_message() { _impl_._oneof_case_[0] = kTextMessage; } inline void MessageOfNews::clear_text_message() { - if (news_case() == kTextMessage) + if (_internal_has_text_message()) { _impl_.news_.text_message_.Destroy(); clear_has_news(); @@ -8714,17 +8581,16 @@ namespace protobuf // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.text_message) return _internal_text_message(); } - template - inline PROTOBUF_ALWAYS_INLINE void MessageOfNews::set_text_message(Arg_&& arg, Args_... args) + template + inline void MessageOfNews::set_text_message(ArgT0&& arg0, ArgT... args) { - if (news_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_news(); - set_has_text_message(); _impl_.news_.text_message_.InitDefault(); } - _impl_.news_.text_message_.Set(static_cast(arg), args..., GetArenaForAllocation()); + _impl_.news_.text_message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.text_message) } inline std::string* MessageOfNews::mutable_text_message() @@ -8735,30 +8601,27 @@ namespace protobuf } inline const std::string& MessageOfNews::_internal_text_message() const { - if (news_case() != kTextMessage) + if (_internal_has_text_message()) { - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); + return _impl_.news_.text_message_.Get(); } - return _impl_.news_.text_message_.Get(); + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); } inline void MessageOfNews::_internal_set_text_message(const std::string& value) { - if (news_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_news(); - set_has_text_message(); _impl_.news_.text_message_.InitDefault(); } - _impl_.news_.text_message_.Set(value, GetArenaForAllocation()); } inline std::string* MessageOfNews::_internal_mutable_text_message() { - if (news_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_news(); - set_has_text_message(); _impl_.news_.text_message_.InitDefault(); } @@ -8767,39 +8630,46 @@ namespace protobuf inline std::string* MessageOfNews::release_text_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfNews.text_message) - if (news_case() != kTextMessage) + if (_internal_has_text_message()) + { + clear_has_news(); + return _impl_.news_.text_message_.Release(); + } + else { return nullptr; } - clear_has_news(); - return _impl_.news_.text_message_.Release(); } - inline void MessageOfNews::set_allocated_text_message(std::string* value) + inline void MessageOfNews::set_allocated_text_message(std::string* text_message) { if (has_news()) { clear_news(); } - if (value != nullptr) + if (text_message != nullptr) { set_has_text_message(); - _impl_.news_.text_message_.InitAllocated(value, GetArenaForAllocation()); + _impl_.news_.text_message_.InitAllocated(text_message, GetArenaForAllocation()); } // @@protoc_insertion_point(field_set_allocated:protobuf.MessageOfNews.text_message) } // bytes binary_message = 4; - inline bool MessageOfNews::has_binary_message() const + inline bool MessageOfNews::_internal_has_binary_message() const { return news_case() == kBinaryMessage; } + inline bool MessageOfNews::has_binary_message() const + { + return _internal_has_binary_message(); + } inline void MessageOfNews::set_has_binary_message() { _impl_._oneof_case_[0] = kBinaryMessage; } inline void MessageOfNews::clear_binary_message() { - if (news_case() == kBinaryMessage) + if (_internal_has_binary_message()) { _impl_.news_.binary_message_.Destroy(); clear_has_news(); @@ -8810,17 +8680,16 @@ namespace protobuf // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.binary_message) return _internal_binary_message(); } - template - inline PROTOBUF_ALWAYS_INLINE void MessageOfNews::set_binary_message(Arg_&& arg, Args_... args) + template + inline void MessageOfNews::set_binary_message(ArgT0&& arg0, ArgT... args) { - if (news_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_news(); - set_has_binary_message(); _impl_.news_.binary_message_.InitDefault(); } - _impl_.news_.binary_message_.SetBytes(static_cast(arg), args..., GetArenaForAllocation()); + _impl_.news_.binary_message_.SetBytes(static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.binary_message) } inline std::string* MessageOfNews::mutable_binary_message() @@ -8831,30 +8700,27 @@ namespace protobuf } inline const std::string& MessageOfNews::_internal_binary_message() const { - if (news_case() != kBinaryMessage) + if (_internal_has_binary_message()) { - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); + return _impl_.news_.binary_message_.Get(); } - return _impl_.news_.binary_message_.Get(); + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); } inline void MessageOfNews::_internal_set_binary_message(const std::string& value) { - if (news_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_news(); - set_has_binary_message(); _impl_.news_.binary_message_.InitDefault(); } - _impl_.news_.binary_message_.Set(value, GetArenaForAllocation()); } inline std::string* MessageOfNews::_internal_mutable_binary_message() { - if (news_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_news(); - set_has_binary_message(); _impl_.news_.binary_message_.InitDefault(); } @@ -8863,23 +8729,26 @@ namespace protobuf inline std::string* MessageOfNews::release_binary_message() { // @@protoc_insertion_point(field_release:protobuf.MessageOfNews.binary_message) - if (news_case() != kBinaryMessage) + if (_internal_has_binary_message()) + { + clear_has_news(); + return _impl_.news_.binary_message_.Release(); + } + else { return nullptr; } - clear_has_news(); - return _impl_.news_.binary_message_.Release(); } - inline void MessageOfNews::set_allocated_binary_message(std::string* value) + inline void MessageOfNews::set_allocated_binary_message(std::string* binary_message) { if (has_news()) { clear_news(); } - if (value != nullptr) + if (binary_message != nullptr) { set_has_binary_message(); - _impl_.news_.binary_message_.InitAllocated(value, GetArenaForAllocation()); + _impl_.news_.binary_message_.InitAllocated(binary_message, GetArenaForAllocation()); } // @@protoc_insertion_point(field_set_allocated:protobuf.MessageOfNews.binary_message) } @@ -8887,51 +8756,49 @@ namespace protobuf // int64 from_id = 2; inline void MessageOfNews::clear_from_id() { - _impl_.from_id_ = ::int64_t{0}; + _impl_.from_id_ = int64_t{0}; } - inline ::int64_t MessageOfNews::from_id() const + inline int64_t MessageOfNews::_internal_from_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.from_id) - return _internal_from_id(); + return _impl_.from_id_; } - inline void MessageOfNews::set_from_id(::int64_t value) + inline int64_t MessageOfNews::from_id() const { - _internal_set_from_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.from_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.from_id) + return _internal_from_id(); } - inline ::int64_t MessageOfNews::_internal_from_id() const + inline void MessageOfNews::_internal_set_from_id(int64_t value) { - return _impl_.from_id_; + _impl_.from_id_ = value; } - inline void MessageOfNews::_internal_set_from_id(::int64_t value) + inline void MessageOfNews::set_from_id(int64_t value) { - ; - _impl_.from_id_ = value; + _internal_set_from_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.from_id) } // int64 to_id = 3; inline void MessageOfNews::clear_to_id() { - _impl_.to_id_ = ::int64_t{0}; + _impl_.to_id_ = int64_t{0}; } - inline ::int64_t MessageOfNews::to_id() const + inline int64_t MessageOfNews::_internal_to_id() const { - // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.to_id) - return _internal_to_id(); + return _impl_.to_id_; } - inline void MessageOfNews::set_to_id(::int64_t value) + inline int64_t MessageOfNews::to_id() const { - _internal_set_to_id(value); - // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.to_id) + // @@protoc_insertion_point(field_get:protobuf.MessageOfNews.to_id) + return _internal_to_id(); } - inline ::int64_t MessageOfNews::_internal_to_id() const + inline void MessageOfNews::_internal_set_to_id(int64_t value) { - return _impl_.to_id_; + _impl_.to_id_ = value; } - inline void MessageOfNews::_internal_set_to_id(::int64_t value) + inline void MessageOfNews::set_to_id(int64_t value) { - ; - _impl_.to_id_ = value; + _internal_set_to_id(value); + // @@protoc_insertion_point(field_set:protobuf.MessageOfNews.to_id) } inline bool MessageOfNews::has_news() const @@ -8949,12 +8816,49 @@ namespace protobuf #ifdef __GNUC__ #pragma GCC diagnostic pop #endif // __GNUC__ + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) + } // namespace protobuf // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" - -#endif // GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto_2epb_2eh +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_Message2Clients_2eproto diff --git a/CAPI/cpp/proto/Message2Server.pb.cc b/CAPI/cpp/proto/Message2Server.pb.cc index dd2f9d7d..de7eb835 100644 --- a/CAPI/cpp/proto/Message2Server.pb.cc +++ b/CAPI/cpp/proto/Message2Server.pb.cc @@ -4,23 +4,24 @@ #include "Message2Server.pb.h" #include -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/extension_set.h" -#include "google/protobuf/wire_format_lite.h" -#include "google/protobuf/descriptor.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/reflection_ops.h" -#include "google/protobuf/wire_format.h" + +#include +#include +#include +#include +#include +#include +#include // @@protoc_insertion_point(includes) +#include -// Must be included last. -#include "google/protobuf/port_def.inc" PROTOBUF_PRAGMA_INIT_SEG + namespace _pb = ::PROTOBUF_NAMESPACE_ID; -namespace _pbi = ::PROTOBUF_NAMESPACE_ID::internal; +namespace _pbi = _pb::internal; + namespace protobuf { - template PROTOBUF_CONSTEXPR NullRequest::NullRequest( ::_pbi::ConstantInitialized ) @@ -40,21 +41,12 @@ namespace protobuf NullRequest _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NullRequestDefaultTypeInternal _NullRequest_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 NullRequestDefaultTypeInternal _NullRequest_default_instance_; PROTOBUF_CONSTEXPR IDMsg::IDMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct IDMsgDefaultTypeInternal @@ -71,30 +63,12 @@ namespace protobuf IDMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 IDMsgDefaultTypeInternal _IDMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 IDMsgDefaultTypeInternal _IDMsg_default_instance_; PROTOBUF_CONSTEXPR PlayerMsg::PlayerMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.ship_type_)*/ 0 - - , - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.ship_type_)*/ 0, /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct PlayerMsgDefaultTypeInternal @@ -111,27 +85,12 @@ namespace protobuf PlayerMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 PlayerMsgDefaultTypeInternal _PlayerMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 PlayerMsgDefaultTypeInternal _PlayerMsg_default_instance_; PROTOBUF_CONSTEXPR MoveMsg::MoveMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.angle_)*/ 0 - - , - /*decltype(_impl_.time_in_milliseconds_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.angle_)*/ 0, /*decltype(_impl_.time_in_milliseconds_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct MoveMsgDefaultTypeInternal @@ -148,26 +107,12 @@ namespace protobuf MoveMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MoveMsgDefaultTypeInternal _MoveMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 MoveMsgDefaultTypeInternal _MoveMsg_default_instance_; PROTOBUF_CONSTEXPR SendMsg::SendMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.to_player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.message_)*/ {}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.to_player_id_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.message_)*/ {}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}} { } struct SendMsgDefaultTypeInternal @@ -184,24 +129,12 @@ namespace protobuf SendMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendMsgDefaultTypeInternal _SendMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 SendMsgDefaultTypeInternal _SendMsg_default_instance_; PROTOBUF_CONSTEXPR AttackMsg::AttackMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.angle_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.angle_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct AttackMsgDefaultTypeInternal @@ -218,24 +151,12 @@ namespace protobuf AttackMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AttackMsgDefaultTypeInternal _AttackMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 AttackMsgDefaultTypeInternal _AttackMsg_default_instance_; PROTOBUF_CONSTEXPR ConstructMsg::ConstructMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.construction_type_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.construction_type_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct ConstructMsgDefaultTypeInternal @@ -252,24 +173,12 @@ namespace protobuf ConstructMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ConstructMsgDefaultTypeInternal _ConstructMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 ConstructMsgDefaultTypeInternal _ConstructMsg_default_instance_; PROTOBUF_CONSTEXPR RecoverMsg::RecoverMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.recover_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.recover_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_._cached_size_)*/ {}} { } struct RecoverMsgDefaultTypeInternal @@ -286,24 +195,12 @@ namespace protobuf RecoverMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RecoverMsgDefaultTypeInternal _RecoverMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 RecoverMsgDefaultTypeInternal _RecoverMsg_default_instance_; PROTOBUF_CONSTEXPR InstallMsg::InstallMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.player_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.module_type_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.player_id_)*/ int64_t{0}, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.module_type_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct InstallMsgDefaultTypeInternal @@ -320,27 +217,12 @@ namespace protobuf InstallMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 InstallMsgDefaultTypeInternal _InstallMsg_default_instance_; - template + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 InstallMsgDefaultTypeInternal _InstallMsg_default_instance_; PROTOBUF_CONSTEXPR BuildShipMsg::BuildShipMsg( ::_pbi::ConstantInitialized ) : _impl_{ - /*decltype(_impl_.x_)*/ 0 - - , - /*decltype(_impl_.y_)*/ 0 - - , - /*decltype(_impl_.team_id_)*/ ::int64_t{0} - - , - /*decltype(_impl_.ship_type_)*/ 0 - - , - /*decltype(_impl_._cached_size_)*/ {}} + /*decltype(_impl_.x_)*/ 0, /*decltype(_impl_.y_)*/ 0, /*decltype(_impl_.team_id_)*/ int64_t{0}, /*decltype(_impl_.ship_type_)*/ 0, /*decltype(_impl_._cached_size_)*/ {}} { } struct BuildShipMsgDefaultTypeInternal @@ -357,34 +239,25 @@ namespace protobuf BuildShipMsg _instance; }; }; - - PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT - PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BuildShipMsgDefaultTypeInternal _BuildShipMsg_default_instance_; + PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 BuildShipMsgDefaultTypeInternal _BuildShipMsg_default_instance_; } // namespace protobuf static ::_pb::Metadata file_level_metadata_Message2Server_2eproto[10]; -static constexpr const ::_pb::EnumDescriptor** - file_level_enum_descriptors_Message2Server_2eproto = nullptr; -static constexpr const ::_pb::ServiceDescriptor** - file_level_service_descriptors_Message2Server_2eproto = nullptr; -const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE( - protodesc_cold -) = { +static constexpr ::_pb::EnumDescriptor const** file_level_enum_descriptors_Message2Server_2eproto = nullptr; +static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_Message2Server_2eproto = nullptr; + +const uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::NullRequest, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::protobuf::IDMsg, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::IDMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::IDMsg, _impl_.team_id_), ~0u, // no _has_bits_ @@ -393,8 +266,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::PlayerMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::PlayerMsg, _impl_.team_id_), PROTOBUF_FIELD_OFFSET(::protobuf::PlayerMsg, _impl_.ship_type_), @@ -406,8 +277,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::MoveMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::MoveMsg, _impl_.angle_), PROTOBUF_FIELD_OFFSET(::protobuf::MoveMsg, _impl_.time_in_milliseconds_), @@ -418,8 +287,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ PROTOBUF_FIELD_OFFSET(::protobuf::SendMsg, _impl_._oneof_case_[0]), ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::SendMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::SendMsg, _impl_.to_player_id_), ::_pbi::kInvalidFieldOffsetTag, @@ -432,8 +299,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::AttackMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::AttackMsg, _impl_.angle_), PROTOBUF_FIELD_OFFSET(::protobuf::AttackMsg, _impl_.team_id_), @@ -443,8 +308,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::ConstructMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::ConstructMsg, _impl_.construction_type_), PROTOBUF_FIELD_OFFSET(::protobuf::ConstructMsg, _impl_.team_id_), @@ -454,8 +317,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::RecoverMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::RecoverMsg, _impl_.recover_), PROTOBUF_FIELD_OFFSET(::protobuf::RecoverMsg, _impl_.team_id_), @@ -465,8 +326,6 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::InstallMsg, _impl_.module_type_), PROTOBUF_FIELD_OFFSET(::protobuf::InstallMsg, _impl_.player_id_), PROTOBUF_FIELD_OFFSET(::protobuf::InstallMsg, _impl_.team_id_), @@ -476,26 +335,22 @@ const ::uint32_t TableStruct_Message2Server_2eproto::offsets[] PROTOBUF_SECTION_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ ~0u, // no _inlined_string_donated_ - ~0u, // no _split_ - ~0u, // no sizeof(Split) PROTOBUF_FIELD_OFFSET(::protobuf::BuildShipMsg, _impl_.x_), PROTOBUF_FIELD_OFFSET(::protobuf::BuildShipMsg, _impl_.y_), PROTOBUF_FIELD_OFFSET(::protobuf::BuildShipMsg, _impl_.ship_type_), PROTOBUF_FIELD_OFFSET(::protobuf::BuildShipMsg, _impl_.team_id_), }; - -static const ::_pbi::MigrationSchema - schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { - {0, -1, -1, sizeof(::protobuf::NullRequest)}, - {8, -1, -1, sizeof(::protobuf::IDMsg)}, - {18, -1, -1, sizeof(::protobuf::PlayerMsg)}, - {31, -1, -1, sizeof(::protobuf::MoveMsg)}, - {43, -1, -1, sizeof(::protobuf::SendMsg)}, - {57, -1, -1, sizeof(::protobuf::AttackMsg)}, - {68, -1, -1, sizeof(::protobuf::ConstructMsg)}, - {79, -1, -1, sizeof(::protobuf::RecoverMsg)}, - {90, -1, -1, sizeof(::protobuf::InstallMsg)}, - {101, -1, -1, sizeof(::protobuf::BuildShipMsg)}, +static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + {0, -1, -1, sizeof(::protobuf::NullRequest)}, + {6, -1, -1, sizeof(::protobuf::IDMsg)}, + {14, -1, -1, sizeof(::protobuf::PlayerMsg)}, + {25, -1, -1, sizeof(::protobuf::MoveMsg)}, + {35, -1, -1, sizeof(::protobuf::SendMsg)}, + {47, -1, -1, sizeof(::protobuf::AttackMsg)}, + {56, -1, -1, sizeof(::protobuf::ConstructMsg)}, + {65, -1, -1, sizeof(::protobuf::RecoverMsg)}, + {74, -1, -1, sizeof(::protobuf::InstallMsg)}, + {83, -1, -1, sizeof(::protobuf::BuildShipMsg)}, }; static const ::_pb::Message* const file_default_instances[] = { @@ -510,7 +365,8 @@ static const ::_pb::Message* const file_default_instances[] = { &::protobuf::_InstallMsg_default_instance_._instance, &::protobuf::_BuildShipMsg_default_instance_._instance, }; -const char descriptor_table_protodef_Message2Server_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + +const char descriptor_table_protodef_Message2Server_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\024Message2Server.proto\022\010protobuf\032\021Messag" "eType.proto\"\r\n\013NullRequest\"+\n\005IDMsg\022\021\n\tp" "layer_id\030\001 \001(\003\022\017\n\007team_id\030\002 \001(\003\"l\n\tPlaye" @@ -532,12 +388,11 @@ const char descriptor_table_protodef_Message2Server_2eproto[] PROTOBUF_SECTION_V "protobuf.ModuleType\022\021\n\tplayer_id\030\002 \001(\003\022\017" "\n\007team_id\030\003 \001(\003\"\\\n\014BuildShipMsg\022\t\n\001x\030\001 \001" "(\005\022\t\n\001y\030\002 \001(\005\022%\n\tship_type\030\003 \001(\0162\022.proto" - "buf.ShipType\022\017\n\007team_id\030\004 \001(\003b\006proto3"}; -static const ::_pbi::DescriptorTable* const descriptor_table_Message2Server_2eproto_deps[1] = - { - &::descriptor_table_MessageType_2eproto, + "buf.ShipType\022\017\n\007team_id\030\004 \001(\003b\006proto3"; +static const ::_pbi::DescriptorTable* const descriptor_table_Message2Server_2eproto_deps[1] = { + &::descriptor_table_MessageType_2eproto, }; -static ::absl::once_flag descriptor_table_Message2Server_2eproto_once; +static ::_pbi::once_flag descriptor_table_Message2Server_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_Message2Server_2eproto = { false, false, @@ -555,27 +410,16 @@ const ::_pbi::DescriptorTable descriptor_table_Message2Server_2eproto = { file_level_enum_descriptors_Message2Server_2eproto, file_level_service_descriptors_Message2Server_2eproto, }; - -// This function exists to be marked as weak. -// It can significantly speed up compilation by breaking up LLVM's SCC -// in the .pb.cc translation units. Large translation units see a -// reduction of more than 35% of walltime for optimized builds. Without -// the weak attribute all the messages in the file, including all the -// vtables and everything they use become part of the same SCC through -// a cycle like: -// GetMetadata -> descriptor table -> default instances -> -// vtables -> GetMetadata -// By adding a weak function here we break the connection from the -// individual vtables back into the descriptor table. PROTOBUF_ATTRIBUTE_WEAK const ::_pbi::DescriptorTable* descriptor_table_Message2Server_2eproto_getter() { return &descriptor_table_Message2Server_2eproto; } + // Force running AddDescriptors() at dynamic initialization time. -PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 -static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Message2Server_2eproto(&descriptor_table_Message2Server_2eproto); +PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Message2Server_2eproto(&descriptor_table_Message2Server_2eproto); namespace protobuf { + // =================================================================== class NullRequest::_Internal @@ -583,8 +427,8 @@ namespace protobuf public: }; - NullRequest::NullRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena) + NullRequest::NullRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena, is_message_owned) { // @@protoc_insertion_point(arena_constructor:protobuf.NullRequest) } @@ -612,6 +456,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[0] ); } + // =================================================================== class IDMsg::_Internal @@ -619,33 +464,33 @@ namespace protobuf public: }; - IDMsg::IDMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + IDMsg::IDMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.IDMsg) } IDMsg::IDMsg(const IDMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + IDMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.team_id_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); // @@protoc_insertion_point(copy_constructor:protobuf.IDMsg) } - inline void IDMsg::SharedCtor(::_pb::Arena* arena) + inline void IDMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } IDMsg::~IDMsg() @@ -661,7 +506,7 @@ namespace protobuf inline void IDMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void IDMsg::SetCachedSize(int size) const @@ -672,11 +517,11 @@ namespace protobuf void IDMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.IDMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -687,33 +532,29 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -741,30 +582,26 @@ namespace protobuf #undef CHK_ } - ::uint8_t* IDMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* IDMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.IDMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // int64 team_id = 2; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -777,29 +614,25 @@ namespace protobuf return target; } - ::size_t IDMsg::ByteSizeLong() const + size_t IDMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.IDMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 team_id = 2; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -818,8 +651,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.IDMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -864,6 +697,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[1] ); } + // =================================================================== class PlayerMsg::_Internal @@ -871,42 +705,33 @@ namespace protobuf public: }; - PlayerMsg::PlayerMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + PlayerMsg::PlayerMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.PlayerMsg) } PlayerMsg::PlayerMsg(const PlayerMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + PlayerMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.team_id_){}, decltype(_impl_.ship_type_){}, decltype(_impl_.x_){}, decltype(_impl_.y_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.y_)); // @@protoc_insertion_point(copy_constructor:protobuf.PlayerMsg) } - inline void PlayerMsg::SharedCtor(::_pb::Arena* arena) + inline void PlayerMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.ship_type_){0} - - , - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.ship_type_){0}, decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } PlayerMsg::~PlayerMsg() @@ -922,7 +747,7 @@ namespace protobuf inline void PlayerMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void PlayerMsg::SetCachedSize(int size) const @@ -933,11 +758,11 @@ namespace protobuf void PlayerMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.PlayerMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.y_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.y_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.y_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -948,70 +773,60 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.ShipType ship_type = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_ship_type(static_cast<::protobuf::ShipType>(val)); } else - { goto handle_unusual; - } continue; // int32 x = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -1039,30 +854,26 @@ namespace protobuf #undef CHK_ } - ::uint8_t* PlayerMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* PlayerMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.PlayerMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // int64 team_id = 2; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_team_id(), target); } // .protobuf.ShipType ship_type = 3; @@ -1078,18 +889,14 @@ namespace protobuf if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 4, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(4, this->_internal_x(), target); } // int32 y = 5; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 5, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(5, this->_internal_y(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -1102,29 +909,25 @@ namespace protobuf return target; } - ::size_t PlayerMsg::ByteSizeLong() const + size_t PlayerMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.PlayerMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 team_id = 2; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // .protobuf.ShipType ship_type = 3; @@ -1137,17 +940,13 @@ namespace protobuf // int32 x = 4; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 5; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -1166,8 +965,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.PlayerMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -1224,6 +1023,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[2] ); } + // =================================================================== class MoveMsg::_Internal @@ -1231,39 +1031,33 @@ namespace protobuf public: }; - MoveMsg::MoveMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + MoveMsg::MoveMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.MoveMsg) } MoveMsg::MoveMsg(const MoveMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + MoveMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.angle_){}, decltype(_impl_.time_in_milliseconds_){}, decltype(_impl_.team_id_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); // @@protoc_insertion_point(copy_constructor:protobuf.MoveMsg) } - inline void MoveMsg::SharedCtor(::_pb::Arena* arena) + inline void MoveMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.angle_){0} - - , - decltype(_impl_.time_in_milliseconds_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.angle_){0}, decltype(_impl_.time_in_milliseconds_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } MoveMsg::~MoveMsg() @@ -1279,7 +1073,7 @@ namespace protobuf inline void MoveMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void MoveMsg::SetCachedSize(int size) const @@ -1290,11 +1084,11 @@ namespace protobuf void MoveMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.MoveMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1305,57 +1099,49 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double angle = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 17)) { _impl_.angle_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // int64 time_in_milliseconds = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.time_in_milliseconds_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -1383,52 +1169,44 @@ namespace protobuf #undef CHK_ } - ::uint8_t* MoveMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* MoveMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.MoveMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // double angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = this->_internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 2, this->_internal_angle(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_angle(), target); } // int64 time_in_milliseconds = 3; if (this->_internal_time_in_milliseconds() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_time_in_milliseconds(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_time_in_milliseconds(), target); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -1441,47 +1219,41 @@ namespace protobuf return target; } - ::size_t MoveMsg::ByteSizeLong() const + size_t MoveMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.MoveMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // double angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = this->_internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { - total_size += 9; + total_size += 1 + 8; } // int64 time_in_milliseconds = 3; if (this->_internal_time_in_milliseconds() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_time_in_milliseconds() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_time_in_milliseconds()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -1500,17 +1272,17 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.MoveMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) { _this->_internal_set_player_id(from._internal_player_id()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = from._internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { @@ -1558,19 +1330,18 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[3] ); } + // =================================================================== class SendMsg::_Internal { public: - static constexpr ::int32_t kOneofCaseOffset = - PROTOBUF_FIELD_OFFSET(::protobuf::SendMsg, _impl_._oneof_case_); }; - SendMsg::SendMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + SendMsg::SendMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.SendMsg) } SendMsg::SendMsg(const SendMsg& from) : @@ -1579,21 +1350,10 @@ namespace protobuf SendMsg* const _this = this; (void)_this; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){} - - , - decltype(_impl_.to_player_id_){} - - , - decltype(_impl_.team_id_){} - - , - decltype(_impl_.message_){}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}}; + decltype(_impl_.player_id_){}, decltype(_impl_.to_player_id_){}, decltype(_impl_.team_id_){}, decltype(_impl_.message_){}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}}; _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); - ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); clear_has_message(); switch (from.message_case()) { @@ -1615,22 +1375,14 @@ namespace protobuf // @@protoc_insertion_point(copy_constructor:protobuf.SendMsg) } - inline void SendMsg::SharedCtor(::_pb::Arena* arena) + inline void SendMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.to_player_id_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.message_){}, - /*decltype(_impl_._cached_size_)*/ {}, - /*decltype(_impl_._oneof_case_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.to_player_id_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.message_){}, /*decltype(_impl_._cached_size_)*/ {}, /*decltype(_impl_._oneof_case_)*/ {}}; clear_has_message(); } @@ -1647,7 +1399,7 @@ namespace protobuf inline void SendMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); if (has_message()) { clear_message(); @@ -1685,11 +1437,11 @@ namespace protobuf void SendMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.SendMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); clear_message(); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -1701,37 +1453,33 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 to_player_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.to_player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // string text_message = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 26)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 26)) { auto str = _internal_mutable_text_message(); ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx); @@ -1739,34 +1487,28 @@ namespace protobuf CHK_(::_pbi::VerifyUTF8(str, "protobuf.SendMsg.text_message")); } else - { goto handle_unusual; - } continue; // bytes binary_message = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 34)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 34)) { auto str = _internal_mutable_binary_message(); ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 5; case 5: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 40)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 40)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -1794,58 +1536,52 @@ namespace protobuf #undef CHK_ } - ::uint8_t* SendMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* SendMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.SendMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // int64 to_player_id = 2; if (this->_internal_to_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_to_player_id(), target + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_to_player_id(), target); + } + + // string text_message = 3; + if (_internal_has_text_message()) + { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->_internal_text_message().data(), static_cast(this->_internal_text_message().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "protobuf.SendMsg.text_message" + ); + target = stream->WriteStringMaybeAliased( + 3, this->_internal_text_message(), target ); } - switch (message_case()) + // bytes binary_message = 4; + if (_internal_has_binary_message()) { - case kTextMessage: - { - const std::string& _s = this->_internal_text_message(); - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( - _s.data(), static_cast(_s.length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, "protobuf.SendMsg.text_message" - ); - target = stream->WriteStringMaybeAliased(3, _s, target); - break; - } - case kBinaryMessage: - { - const std::string& _s = this->_internal_binary_message(); - target = stream->WriteBytesMaybeAliased(4, _s, target); - break; - } - default:; + target = stream->WriteBytesMaybeAliased( + 4, this->_internal_binary_message(), target + ); } + // int64 team_id = 5; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 5, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(5, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -1858,37 +1594,31 @@ namespace protobuf return target; } - ::size_t SendMsg::ByteSizeLong() const + size_t SendMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.SendMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 to_player_id = 2; if (this->_internal_to_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_to_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_to_player_id()); } // int64 team_id = 5; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } switch (message_case()) @@ -1896,17 +1626,19 @@ namespace protobuf // string text_message = 3; case kTextMessage: { - total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( - this->_internal_text_message() - ); + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->_internal_text_message() + ); break; } // bytes binary_message = 4; case kBinaryMessage: { - total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_binary_message() - ); + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( + this->_internal_binary_message() + ); break; } case MESSAGE_NOT_SET: @@ -1930,8 +1662,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.SendMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -1999,6 +1731,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[4] ); } + // =================================================================== class AttackMsg::_Internal @@ -2006,36 +1739,33 @@ namespace protobuf public: }; - AttackMsg::AttackMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + AttackMsg::AttackMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.AttackMsg) } AttackMsg::AttackMsg(const AttackMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + AttackMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.angle_){}, decltype(_impl_.team_id_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); // @@protoc_insertion_point(copy_constructor:protobuf.AttackMsg) } - inline void AttackMsg::SharedCtor(::_pb::Arena* arena) + inline void AttackMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.angle_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.angle_){0}, decltype(_impl_.team_id_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } AttackMsg::~AttackMsg() @@ -2051,7 +1781,7 @@ namespace protobuf inline void AttackMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void AttackMsg::SetCachedSize(int size) const @@ -2062,11 +1792,11 @@ namespace protobuf void AttackMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.AttackMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2077,45 +1807,39 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // double angle = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 17)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 17)) { _impl_.angle_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad(ptr); ptr += sizeof(double); } else - { goto handle_unusual; - } continue; // int64 team_id = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -2143,43 +1867,37 @@ namespace protobuf #undef CHK_ } - ::uint8_t* AttackMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* AttackMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.AttackMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // double angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = this->_internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteDoubleToArray( - 2, this->_internal_angle(), target - ); + target = ::_pbi::WireFormatLite::WriteDoubleToArray(2, this->_internal_angle(), target); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -2192,39 +1910,35 @@ namespace protobuf return target; } - ::size_t AttackMsg::ByteSizeLong() const + size_t AttackMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.AttackMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // double angle = 2; - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = this->_internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { - total_size += 9; + total_size += 1 + 8; } // int64 team_id = 3; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -2243,17 +1957,17 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.AttackMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) { _this->_internal_set_player_id(from._internal_player_id()); } - static_assert(sizeof(::uint64_t) == sizeof(double), "Code assumes ::uint64_t and double are the same size."); + static_assert(sizeof(uint64_t) == sizeof(double), "Code assumes uint64_t and double are the same size."); double tmp_angle = from._internal_angle(); - ::uint64_t raw_angle; + uint64_t raw_angle; memcpy(&raw_angle, &tmp_angle, sizeof(tmp_angle)); if (raw_angle != 0) { @@ -2297,6 +2011,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[5] ); } + // =================================================================== class ConstructMsg::_Internal @@ -2304,36 +2019,33 @@ namespace protobuf public: }; - ConstructMsg::ConstructMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + ConstructMsg::ConstructMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.ConstructMsg) } ConstructMsg::ConstructMsg(const ConstructMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + ConstructMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.team_id_){}, decltype(_impl_.construction_type_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.construction_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.construction_type_)); // @@protoc_insertion_point(copy_constructor:protobuf.ConstructMsg) } - inline void ConstructMsg::SharedCtor(::_pb::Arena* arena) + inline void ConstructMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.construction_type_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.construction_type_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } ConstructMsg::~ConstructMsg() @@ -2349,7 +2061,7 @@ namespace protobuf inline void ConstructMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void ConstructMsg::SetCachedSize(int size) const @@ -2360,11 +2072,11 @@ namespace protobuf void ConstructMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.ConstructMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.construction_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.construction_type_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.construction_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.construction_type_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2375,46 +2087,40 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.ConstructionType construction_type = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_construction_type(static_cast<::protobuf::ConstructionType>(val)); } else - { goto handle_unusual; - } continue; // int64 team_id = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -2442,21 +2148,19 @@ namespace protobuf #undef CHK_ } - ::uint8_t* ConstructMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* ConstructMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.ConstructMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // .protobuf.ConstructionType construction_type = 2; @@ -2472,9 +2176,7 @@ namespace protobuf if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -2487,29 +2189,25 @@ namespace protobuf return target; } - ::size_t ConstructMsg::ByteSizeLong() const + size_t ConstructMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.ConstructMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // .protobuf.ConstructionType construction_type = 2; @@ -2535,8 +2233,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.ConstructMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -2585,6 +2283,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[6] ); } + // =================================================================== class RecoverMsg::_Internal @@ -2592,36 +2291,33 @@ namespace protobuf public: }; - RecoverMsg::RecoverMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + RecoverMsg::RecoverMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.RecoverMsg) } RecoverMsg::RecoverMsg(const RecoverMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + RecoverMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.recover_){}, decltype(_impl_.team_id_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); // @@protoc_insertion_point(copy_constructor:protobuf.RecoverMsg) } - inline void RecoverMsg::SharedCtor(::_pb::Arena* arena) + inline void RecoverMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.recover_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.recover_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, /*decltype(_impl_._cached_size_)*/ {}}; } RecoverMsg::~RecoverMsg() @@ -2637,7 +2333,7 @@ namespace protobuf inline void RecoverMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void RecoverMsg::SetCachedSize(int size) const @@ -2648,11 +2344,11 @@ namespace protobuf void RecoverMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.RecoverMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.team_id_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.team_id_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2663,45 +2359,39 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int64 player_id = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 recover = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.recover_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -2729,39 +2419,33 @@ namespace protobuf #undef CHK_ } - ::uint8_t* RecoverMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* RecoverMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.RecoverMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 1, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(1, this->_internal_player_id(), target); } // int64 recover = 2; if (this->_internal_recover() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_recover(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_recover(), target); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -2774,37 +2458,31 @@ namespace protobuf return target; } - ::size_t RecoverMsg::ByteSizeLong() const + size_t RecoverMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.RecoverMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 1; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 recover = 2; if (this->_internal_recover() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_recover() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_recover()); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_); @@ -2823,8 +2501,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.RecoverMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -2873,6 +2551,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[7] ); } + // =================================================================== class InstallMsg::_Internal @@ -2880,36 +2559,33 @@ namespace protobuf public: }; - InstallMsg::InstallMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + InstallMsg::InstallMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.InstallMsg) } InstallMsg::InstallMsg(const InstallMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + InstallMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.player_id_){}, decltype(_impl_.team_id_){}, decltype(_impl_.module_type_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.player_id_, &from._impl_.player_id_, static_cast(reinterpret_cast(&_impl_.module_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.module_type_)); // @@protoc_insertion_point(copy_constructor:protobuf.InstallMsg) } - inline void InstallMsg::SharedCtor(::_pb::Arena* arena) + inline void InstallMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.player_id_){::int64_t{0}} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.module_type_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.player_id_){int64_t{0}}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.module_type_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } InstallMsg::~InstallMsg() @@ -2925,7 +2601,7 @@ namespace protobuf inline void InstallMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void InstallMsg::SetCachedSize(int size) const @@ -2936,11 +2612,11 @@ namespace protobuf void InstallMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.InstallMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.player_id_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.module_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.module_type_)); + ::memset(&_impl_.player_id_, 0, static_cast(reinterpret_cast(&_impl_.module_type_) - reinterpret_cast(&_impl_.player_id_)) + sizeof(_impl_.module_type_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -2951,46 +2627,40 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // .protobuf.ModuleType module_type = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_module_type(static_cast<::protobuf::ModuleType>(val)); } else - { goto handle_unusual; - } continue; // int64 player_id = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.player_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int64 team_id = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -3018,12 +2688,12 @@ namespace protobuf #undef CHK_ } - ::uint8_t* InstallMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* InstallMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.InstallMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // .protobuf.ModuleType module_type = 1; @@ -3039,18 +2709,14 @@ namespace protobuf if (this->_internal_player_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 2, this->_internal_player_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(2, this->_internal_player_id(), target); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 3, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(3, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -3063,29 +2729,25 @@ namespace protobuf return target; } - ::size_t InstallMsg::ByteSizeLong() const + size_t InstallMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.InstallMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int64 player_id = 2; if (this->_internal_player_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_player_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_player_id()); } // int64 team_id = 3; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // .protobuf.ModuleType module_type = 1; @@ -3111,8 +2773,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.InstallMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_player_id() != 0) @@ -3161,6 +2823,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[8] ); } + // =================================================================== class BuildShipMsg::_Internal @@ -3168,39 +2831,33 @@ namespace protobuf public: }; - BuildShipMsg::BuildShipMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena) : - ::PROTOBUF_NAMESPACE_ID::Message(arena) + BuildShipMsg::BuildShipMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : + ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { - SharedCtor(arena); + SharedCtor(arena, is_message_owned); // @@protoc_insertion_point(arena_constructor:protobuf.BuildShipMsg) } BuildShipMsg::BuildShipMsg(const BuildShipMsg& from) : - ::PROTOBUF_NAMESPACE_ID::Message(), - _impl_(from._impl_) + ::PROTOBUF_NAMESPACE_ID::Message() { - _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( - from._internal_metadata_ - ); + BuildShipMsg* const _this = this; + (void)_this; + new (&_impl_) Impl_{ + decltype(_impl_.x_){}, decltype(_impl_.y_){}, decltype(_impl_.team_id_){}, decltype(_impl_.ship_type_){}, /*decltype(_impl_._cached_size_)*/ {}}; + + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + ::memcpy(&_impl_.x_, &from._impl_.x_, static_cast(reinterpret_cast(&_impl_.ship_type_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.ship_type_)); // @@protoc_insertion_point(copy_constructor:protobuf.BuildShipMsg) } - inline void BuildShipMsg::SharedCtor(::_pb::Arena* arena) + inline void BuildShipMsg::SharedCtor( + ::_pb::Arena* arena, bool is_message_owned + ) { (void)arena; + (void)is_message_owned; new (&_impl_) Impl_{ - decltype(_impl_.x_){0} - - , - decltype(_impl_.y_){0} - - , - decltype(_impl_.team_id_){::int64_t{0}} - - , - decltype(_impl_.ship_type_){0} - - , - /*decltype(_impl_._cached_size_)*/ {}}; + decltype(_impl_.x_){0}, decltype(_impl_.y_){0}, decltype(_impl_.team_id_){int64_t{0}}, decltype(_impl_.ship_type_){0}, /*decltype(_impl_._cached_size_)*/ {}}; } BuildShipMsg::~BuildShipMsg() @@ -3216,7 +2873,7 @@ namespace protobuf inline void BuildShipMsg::SharedDtor() { - ABSL_DCHECK(GetArenaForAllocation() == nullptr); + GOOGLE_DCHECK(GetArenaForAllocation() == nullptr); } void BuildShipMsg::SetCachedSize(int size) const @@ -3227,11 +2884,11 @@ namespace protobuf void BuildShipMsg::Clear() { // @@protoc_insertion_point(message_clear_start:protobuf.BuildShipMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; - ::memset(&_impl_.x_, 0, static_cast<::size_t>(reinterpret_cast(&_impl_.ship_type_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.ship_type_)); + ::memset(&_impl_.x_, 0, static_cast(reinterpret_cast(&_impl_.ship_type_) - reinterpret_cast(&_impl_.x_)) + sizeof(_impl_.ship_type_)); _internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); } @@ -3242,58 +2899,50 @@ namespace protobuf goto failure while (!ctx->Done(&ptr)) { - ::uint32_t tag; + uint32_t tag; ptr = ::_pbi::ReadTag(ptr, &tag); switch (tag >> 3) { // int32 x = 1; case 1: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 8)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 8)) { _impl_.x_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // int32 y = 2; case 2: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 16)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 16)) { _impl_.y_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; // .protobuf.ShipType ship_type = 3; case 3: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 24)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 24)) { - ::int32_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); + uint64_t val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); _internal_set_ship_type(static_cast<::protobuf::ShipType>(val)); } else - { goto handle_unusual; - } continue; // int64 team_id = 4; case 4: - if (PROTOBUF_PREDICT_TRUE(static_cast<::uint8_t>(tag) == 32)) + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 32)) { _impl_.team_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr); CHK_(ptr); } else - { goto handle_unusual; - } continue; default: goto handle_unusual; @@ -3321,30 +2970,26 @@ namespace protobuf #undef CHK_ } - ::uint8_t* BuildShipMsg::_InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* BuildShipMsg::_InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const { // @@protoc_insertion_point(serialize_to_array_start:protobuf.BuildShipMsg) - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 1, this->_internal_x(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(1, this->_internal_x(), target); } // int32 y = 2; if (this->_internal_y() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt32ToArray( - 2, this->_internal_y(), target - ); + target = ::_pbi::WireFormatLite::WriteInt32ToArray(2, this->_internal_y(), target); } // .protobuf.ShipType ship_type = 3; @@ -3360,9 +3005,7 @@ namespace protobuf if (this->_internal_team_id() != 0) { target = stream->EnsureSpace(target); - target = ::_pbi::WireFormatLite::WriteInt64ToArray( - 4, this->_internal_team_id(), target - ); + target = ::_pbi::WireFormatLite::WriteInt64ToArray(4, this->_internal_team_id(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) @@ -3375,37 +3018,31 @@ namespace protobuf return target; } - ::size_t BuildShipMsg::ByteSizeLong() const + size_t BuildShipMsg::ByteSizeLong() const { // @@protoc_insertion_point(message_byte_size_start:protobuf.BuildShipMsg) - ::size_t total_size = 0; + size_t total_size = 0; - ::uint32_t cached_has_bits = 0; + uint32_t cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void)cached_has_bits; // int32 x = 1; if (this->_internal_x() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_x() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_x()); } // int32 y = 2; if (this->_internal_y() != 0) { - total_size += ::_pbi::WireFormatLite::Int32SizePlusOne( - this->_internal_y() - ); + total_size += ::_pbi::WireFormatLite::Int32SizePlusOne(this->_internal_y()); } // int64 team_id = 4; if (this->_internal_team_id() != 0) { - total_size += ::_pbi::WireFormatLite::Int64SizePlusOne( - this->_internal_team_id() - ); + total_size += ::_pbi::WireFormatLite::Int64SizePlusOne(this->_internal_team_id()); } // .protobuf.ShipType ship_type = 3; @@ -3431,8 +3068,8 @@ namespace protobuf auto* const _this = static_cast(&to_msg); auto& from = static_cast(from_msg); // @@protoc_insertion_point(class_specific_merge_from_start:protobuf.BuildShipMsg) - ABSL_DCHECK_NE(&from, _this); - ::uint32_t cached_has_bits = 0; + GOOGLE_DCHECK_NE(&from, _this); + uint32_t cached_has_bits = 0; (void)cached_has_bits; if (from._internal_x() != 0) @@ -3485,6 +3122,7 @@ namespace protobuf &descriptor_table_Message2Server_2eproto_getter, &descriptor_table_Message2Server_2eproto_once, file_level_metadata_Message2Server_2eproto[9] ); } + // @@protoc_insertion_point(namespace_scope) } // namespace protobuf PROTOBUF_NAMESPACE_OPEN @@ -3549,5 +3187,6 @@ PROTOBUF_NOINLINE ::protobuf::BuildShipMsg* return Arena::CreateMessageInternal<::protobuf::BuildShipMsg>(arena); } PROTOBUF_NAMESPACE_CLOSE + // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" +#include diff --git a/CAPI/cpp/proto/Message2Server.pb.h b/CAPI/cpp/proto/Message2Server.pb.h index 50af2525..69bc9138 100644 --- a/CAPI/cpp/proto/Message2Server.pb.h +++ b/CAPI/cpp/proto/Message2Server.pb.h @@ -1,45 +1,40 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: Message2Server.proto -#ifndef GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto_2epb_2eh -#define GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto_2epb_2eh +#ifndef GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto #include #include -#include - -#include "google/protobuf/port_def.inc" -#if PROTOBUF_VERSION < 4023000 -#error "This file was generated by a newer version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please update" -#error "your headers." -#endif // PROTOBUF_VERSION - -#if 4023004 < PROTOBUF_MIN_PROTOC_VERSION -#error "This file was generated by an older version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please" -#error "regenerate this file with a newer version of protoc." -#endif // PROTOBUF_MIN_PROTOC_VERSION -#include "google/protobuf/port_undef.inc" -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/arena.h" -#include "google/protobuf/arenastring.h" -#include "google/protobuf/generated_message_bases.h" -#include "google/protobuf/generated_message_util.h" -#include "google/protobuf/metadata_lite.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/message.h" -#include "google/protobuf/repeated_field.h" // IWYU pragma: export -#include "google/protobuf/extension_set.h" // IWYU pragma: export -#include "google/protobuf/unknown_field_set.h" + +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021006 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include #include "MessageType.pb.h" // @@protoc_insertion_point(includes) - -// Must be included last. -#include "google/protobuf/port_def.inc" - +#include #define PROTOBUF_INTERNAL_EXPORT_Message2Server_2eproto - PROTOBUF_NAMESPACE_OPEN namespace internal { @@ -50,10 +45,9 @@ PROTOBUF_NAMESPACE_CLOSE // Internal implementation detail -- do not use these members. struct TableStruct_Message2Server_2eproto { - static const ::uint32_t offsets[]; + static const uint32_t offsets[]; }; -extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable - descriptor_table_Message2Server_2eproto; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_Message2Server_2eproto; namespace protobuf { class AttackMsg; @@ -109,14 +103,11 @@ ::protobuf::RecoverMsg* Arena::CreateMaybeMessage<::protobuf::RecoverMsg>(Arena* template<> ::protobuf::SendMsg* Arena::CreateMaybeMessage<::protobuf::SendMsg>(Arena*); PROTOBUF_NAMESPACE_CLOSE - namespace protobuf { // =================================================================== - // ------------------------------------------------------------------- - class NullRequest final : public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:protobuf.NullRequest) */ { @@ -125,7 +116,6 @@ namespace protobuf NullRequest(nullptr) { } - template explicit PROTOBUF_CONSTEXPR NullRequest(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); NullRequest(const NullRequest& from); @@ -160,15 +150,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -221,7 +202,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -246,13 +227,13 @@ namespace protobuf private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.NullRequest"; } protected: - explicit NullRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit NullRequest(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -277,7 +258,8 @@ namespace protobuf { }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class IDMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.IDMsg) */ @@ -288,7 +270,6 @@ namespace protobuf { } ~IDMsg() override; - template explicit PROTOBUF_CONSTEXPR IDMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); IDMsg(const IDMsg& from); @@ -323,15 +304,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -384,7 +356,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -409,10 +381,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -420,20 +392,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(IDMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.IDMsg"; } protected: - explicit IDMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit IDMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -452,22 +424,22 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 team_id = 2; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.IDMsg) @@ -481,8 +453,8 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t team_id_; + int64_t player_id_; + int64_t team_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -490,7 +462,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class PlayerMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.PlayerMsg) */ @@ -501,7 +474,6 @@ namespace protobuf { } ~PlayerMsg() override; - template explicit PROTOBUF_CONSTEXPR PlayerMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); PlayerMsg(const PlayerMsg& from); @@ -536,15 +508,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -597,7 +560,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -622,10 +585,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -633,20 +596,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(PlayerMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.PlayerMsg"; } protected: - explicit PlayerMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit PlayerMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -668,22 +631,22 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 team_id = 2; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // .protobuf.ShipType ship_type = 3; @@ -698,22 +661,22 @@ namespace protobuf public: // int32 x = 4; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 5; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // @@protoc_insertion_point(class_scope:protobuf.PlayerMsg) @@ -727,11 +690,11 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t team_id_; + int64_t player_id_; + int64_t team_id_; int ship_type_; - ::int32_t x_; - ::int32_t y_; + int32_t x_; + int32_t y_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -739,7 +702,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class MoveMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.MoveMsg) */ @@ -750,7 +714,6 @@ namespace protobuf { } ~MoveMsg() override; - template explicit PROTOBUF_CONSTEXPR MoveMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); MoveMsg(const MoveMsg& from); @@ -785,15 +748,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -846,7 +800,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -871,10 +825,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -882,20 +836,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(MoveMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.MoveMsg"; } protected: - explicit MoveMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit MoveMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -916,12 +870,12 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // double angle = 2; @@ -936,22 +890,22 @@ namespace protobuf public: // int64 time_in_milliseconds = 3; void clear_time_in_milliseconds(); - ::int64_t time_in_milliseconds() const; - void set_time_in_milliseconds(::int64_t value); + int64_t time_in_milliseconds() const; + void set_time_in_milliseconds(int64_t value); private: - ::int64_t _internal_time_in_milliseconds() const; - void _internal_set_time_in_milliseconds(::int64_t value); + int64_t _internal_time_in_milliseconds() const; + void _internal_set_time_in_milliseconds(int64_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.MoveMsg) @@ -965,10 +919,10 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; + int64_t player_id_; double angle_; - ::int64_t time_in_milliseconds_; - ::int64_t team_id_; + int64_t time_in_milliseconds_; + int64_t team_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -976,7 +930,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class SendMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.SendMsg) */ @@ -987,7 +942,6 @@ namespace protobuf { } ~SendMsg() override; - template explicit PROTOBUF_CONSTEXPR SendMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); SendMsg(const SendMsg& from); @@ -1022,15 +976,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1090,7 +1035,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1115,10 +1060,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1126,20 +1071,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(SendMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.SendMsg"; } protected: - explicit SendMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit SendMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1161,69 +1106,73 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 to_player_id = 2; void clear_to_player_id(); - ::int64_t to_player_id() const; - void set_to_player_id(::int64_t value); + int64_t to_player_id() const; + void set_to_player_id(int64_t value); private: - ::int64_t _internal_to_player_id() const; - void _internal_set_to_player_id(::int64_t value); + int64_t _internal_to_player_id() const; + void _internal_set_to_player_id(int64_t value); public: // int64 team_id = 5; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // string text_message = 3; bool has_text_message() const; + + private: + bool _internal_has_text_message() const; + + public: void clear_text_message(); const std::string& text_message() const; - - template - void set_text_message(Arg_&& arg, Args_... args); + template + void set_text_message(ArgT0&& arg0, ArgT... args); std::string* mutable_text_message(); PROTOBUF_NODISCARD std::string* release_text_message(); - void set_allocated_text_message(std::string* ptr); + void set_allocated_text_message(std::string* text_message); private: const std::string& _internal_text_message() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_text_message( - const std::string& value - ); + inline PROTOBUF_ALWAYS_INLINE void _internal_set_text_message(const std::string& value); std::string* _internal_mutable_text_message(); public: // bytes binary_message = 4; bool has_binary_message() const; + + private: + bool _internal_has_binary_message() const; + + public: void clear_binary_message(); const std::string& binary_message() const; - - template - void set_binary_message(Arg_&& arg, Args_... args); + template + void set_binary_message(ArgT0&& arg0, ArgT... args); std::string* mutable_binary_message(); PROTOBUF_NODISCARD std::string* release_binary_message(); - void set_allocated_binary_message(std::string* ptr); + void set_allocated_binary_message(std::string* binary_message); private: const std::string& _internal_binary_message() const; - inline PROTOBUF_ALWAYS_INLINE void _internal_set_binary_message( - const std::string& value - ); + inline PROTOBUF_ALWAYS_INLINE void _internal_set_binary_message(const std::string& value); std::string* _internal_mutable_binary_message(); public: @@ -1245,9 +1194,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t to_player_id_; - ::int64_t team_id_; + int64_t player_id_; + int64_t to_player_id_; + int64_t team_id_; union MessageUnion { constexpr MessageUnion() : @@ -1259,14 +1208,15 @@ namespace protobuf ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr binary_message_; } message_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; - ::uint32_t _oneof_case_[1]; + uint32_t _oneof_case_[1]; }; union { Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class AttackMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.AttackMsg) */ @@ -1277,7 +1227,6 @@ namespace protobuf { } ~AttackMsg() override; - template explicit PROTOBUF_CONSTEXPR AttackMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); AttackMsg(const AttackMsg& from); @@ -1312,15 +1261,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1373,7 +1313,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1398,10 +1338,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1409,20 +1349,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(AttackMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.AttackMsg"; } protected: - explicit AttackMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit AttackMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1442,12 +1382,12 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // double angle = 2; @@ -1462,12 +1402,12 @@ namespace protobuf public: // int64 team_id = 3; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.AttackMsg) @@ -1481,9 +1421,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; + int64_t player_id_; double angle_; - ::int64_t team_id_; + int64_t team_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1491,7 +1431,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class ConstructMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.ConstructMsg) */ @@ -1502,7 +1443,6 @@ namespace protobuf { } ~ConstructMsg() override; - template explicit PROTOBUF_CONSTEXPR ConstructMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); ConstructMsg(const ConstructMsg& from); @@ -1537,15 +1477,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1598,7 +1529,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1623,10 +1554,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1634,20 +1565,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(ConstructMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.ConstructMsg"; } protected: - explicit ConstructMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit ConstructMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1667,22 +1598,22 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 team_id = 3; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // .protobuf.ConstructionType construction_type = 2; @@ -1706,8 +1637,8 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t team_id_; + int64_t player_id_; + int64_t team_id_; int construction_type_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; @@ -1716,7 +1647,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class RecoverMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.RecoverMsg) */ @@ -1727,7 +1659,6 @@ namespace protobuf { } ~RecoverMsg() override; - template explicit PROTOBUF_CONSTEXPR RecoverMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); RecoverMsg(const RecoverMsg& from); @@ -1762,15 +1693,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -1823,7 +1745,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -1848,10 +1770,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -1859,20 +1781,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(RecoverMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.RecoverMsg"; } protected: - explicit RecoverMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit RecoverMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -1892,32 +1814,32 @@ namespace protobuf }; // int64 player_id = 1; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 recover = 2; void clear_recover(); - ::int64_t recover() const; - void set_recover(::int64_t value); + int64_t recover() const; + void set_recover(int64_t value); private: - ::int64_t _internal_recover() const; - void _internal_set_recover(::int64_t value); + int64_t _internal_recover() const; + void _internal_set_recover(int64_t value); public: // int64 team_id = 3; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // @@protoc_insertion_point(class_scope:protobuf.RecoverMsg) @@ -1931,9 +1853,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t recover_; - ::int64_t team_id_; + int64_t player_id_; + int64_t recover_; + int64_t team_id_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; union @@ -1941,7 +1863,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class InstallMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.InstallMsg) */ @@ -1952,7 +1875,6 @@ namespace protobuf { } ~InstallMsg() override; - template explicit PROTOBUF_CONSTEXPR InstallMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); InstallMsg(const InstallMsg& from); @@ -1987,15 +1909,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2048,7 +1961,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2073,10 +1986,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2084,20 +1997,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(InstallMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.InstallMsg"; } protected: - explicit InstallMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit InstallMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2117,22 +2030,22 @@ namespace protobuf }; // int64 player_id = 2; void clear_player_id(); - ::int64_t player_id() const; - void set_player_id(::int64_t value); + int64_t player_id() const; + void set_player_id(int64_t value); private: - ::int64_t _internal_player_id() const; - void _internal_set_player_id(::int64_t value); + int64_t _internal_player_id() const; + void _internal_set_player_id(int64_t value); public: // int64 team_id = 3; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // .protobuf.ModuleType module_type = 1; @@ -2156,8 +2069,8 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int64_t player_id_; - ::int64_t team_id_; + int64_t player_id_; + int64_t team_id_; int module_type_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; @@ -2166,7 +2079,8 @@ namespace protobuf Impl_ _impl_; }; friend struct ::TableStruct_Message2Server_2eproto; - }; // ------------------------------------------------------------------- + }; + // ------------------------------------------------------------------- class BuildShipMsg final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:protobuf.BuildShipMsg) */ @@ -2177,7 +2091,6 @@ namespace protobuf { } ~BuildShipMsg() override; - template explicit PROTOBUF_CONSTEXPR BuildShipMsg(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); BuildShipMsg(const BuildShipMsg& from); @@ -2212,15 +2125,6 @@ namespace protobuf return *this; } - inline const ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet& unknown_fields() const - { - return _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance); - } - inline ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet* mutable_unknown_fields() - { - return _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(); - } - static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { return GetDescriptor(); @@ -2273,7 +2177,7 @@ namespace protobuf { if (other == this) return; - ABSL_DCHECK(GetOwningArena() == other->GetOwningArena()); + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); InternalSwap(other); } @@ -2298,10 +2202,10 @@ namespace protobuf PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; - ::size_t ByteSizeLong() const final; + size_t ByteSizeLong() const final; const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; - ::uint8_t* _InternalSerialize( - ::uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream + uint8_t* _InternalSerialize( + uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream ) const final; int GetCachedSize() const final { @@ -2309,20 +2213,20 @@ namespace protobuf } private: - void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena); + void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned); void SharedDtor(); void SetCachedSize(int size) const final; void InternalSwap(BuildShipMsg* other); private: friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; - static ::absl::string_view FullMessageName() + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { return "protobuf.BuildShipMsg"; } protected: - explicit BuildShipMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena); + explicit BuildShipMsg(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned = false); public: static const ClassData _class_data_; @@ -2343,32 +2247,32 @@ namespace protobuf }; // int32 x = 1; void clear_x(); - ::int32_t x() const; - void set_x(::int32_t value); + int32_t x() const; + void set_x(int32_t value); private: - ::int32_t _internal_x() const; - void _internal_set_x(::int32_t value); + int32_t _internal_x() const; + void _internal_set_x(int32_t value); public: // int32 y = 2; void clear_y(); - ::int32_t y() const; - void set_y(::int32_t value); + int32_t y() const; + void set_y(int32_t value); private: - ::int32_t _internal_y() const; - void _internal_set_y(::int32_t value); + int32_t _internal_y() const; + void _internal_set_y(int32_t value); public: // int64 team_id = 4; void clear_team_id(); - ::int64_t team_id() const; - void set_team_id(::int64_t value); + int64_t team_id() const; + void set_team_id(int64_t value); private: - ::int64_t _internal_team_id() const; - void _internal_set_team_id(::int64_t value); + int64_t _internal_team_id() const; + void _internal_set_team_id(int64_t value); public: // .protobuf.ShipType ship_type = 3; @@ -2392,9 +2296,9 @@ namespace protobuf typedef void DestructorSkippable_; struct Impl_ { - ::int32_t x_; - ::int32_t y_; - ::int64_t team_id_; + int32_t x_; + int32_t y_; + int64_t team_id_; int ship_type_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; }; @@ -2404,7 +2308,6 @@ namespace protobuf }; friend struct ::TableStruct_Message2Server_2eproto; }; - // =================================================================== // =================================================================== @@ -2413,8 +2316,6 @@ namespace protobuf #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif // __GNUC__ - // ------------------------------------------------------------------- - // NullRequest // ------------------------------------------------------------------- @@ -2424,51 +2325,49 @@ namespace protobuf // int64 player_id = 1; inline void IDMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t IDMsg::player_id() const + inline int64_t IDMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.IDMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void IDMsg::set_player_id(::int64_t value) + inline int64_t IDMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.IDMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.IDMsg.player_id) + return _internal_player_id(); } - inline ::int64_t IDMsg::_internal_player_id() const + inline void IDMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void IDMsg::_internal_set_player_id(::int64_t value) + inline void IDMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.IDMsg.player_id) } // int64 team_id = 2; inline void IDMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t IDMsg::team_id() const + inline int64_t IDMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.IDMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void IDMsg::set_team_id(::int64_t value) + inline int64_t IDMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.IDMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.IDMsg.team_id) + return _internal_team_id(); } - inline ::int64_t IDMsg::_internal_team_id() const + inline void IDMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void IDMsg::_internal_set_team_id(::int64_t value) + inline void IDMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.IDMsg.team_id) } // ------------------------------------------------------------------- @@ -2478,51 +2377,49 @@ namespace protobuf // int64 player_id = 1; inline void PlayerMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t PlayerMsg::player_id() const + inline int64_t PlayerMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void PlayerMsg::set_player_id(::int64_t value) + inline int64_t PlayerMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.player_id) + return _internal_player_id(); } - inline ::int64_t PlayerMsg::_internal_player_id() const + inline void PlayerMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void PlayerMsg::_internal_set_player_id(::int64_t value) + inline void PlayerMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.player_id) } // int64 team_id = 2; inline void PlayerMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t PlayerMsg::team_id() const + inline int64_t PlayerMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void PlayerMsg::set_team_id(::int64_t value) + inline int64_t PlayerMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.team_id) + return _internal_team_id(); } - inline ::int64_t PlayerMsg::_internal_team_id() const + inline void PlayerMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void PlayerMsg::_internal_set_team_id(::int64_t value) + inline void PlayerMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.team_id) } // .protobuf.ShipType ship_type = 3; @@ -2530,49 +2427,47 @@ namespace protobuf { _impl_.ship_type_ = 0; } + inline ::protobuf::ShipType PlayerMsg::_internal_ship_type() const + { + return static_cast<::protobuf::ShipType>(_impl_.ship_type_); + } inline ::protobuf::ShipType PlayerMsg::ship_type() const { // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.ship_type) return _internal_ship_type(); } + inline void PlayerMsg::_internal_set_ship_type(::protobuf::ShipType value) + { + _impl_.ship_type_ = value; + } inline void PlayerMsg::set_ship_type(::protobuf::ShipType value) { _internal_set_ship_type(value); // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.ship_type) } - inline ::protobuf::ShipType PlayerMsg::_internal_ship_type() const - { - return static_cast<::protobuf::ShipType>(_impl_.ship_type_); - } - inline void PlayerMsg::_internal_set_ship_type(::protobuf::ShipType value) - { - ; - _impl_.ship_type_ = value; - } // int32 x = 4; inline void PlayerMsg::clear_x() { _impl_.x_ = 0; } - inline ::int32_t PlayerMsg::x() const + inline int32_t PlayerMsg::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.x) - return _internal_x(); + return _impl_.x_; } - inline void PlayerMsg::set_x(::int32_t value) + inline int32_t PlayerMsg::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.x) + // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.x) + return _internal_x(); } - inline ::int32_t PlayerMsg::_internal_x() const + inline void PlayerMsg::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void PlayerMsg::_internal_set_x(::int32_t value) + inline void PlayerMsg::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.x) } // int32 y = 5; @@ -2580,24 +2475,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t PlayerMsg::y() const + inline int32_t PlayerMsg::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.y) - return _internal_y(); + return _impl_.y_; } - inline void PlayerMsg::set_y(::int32_t value) + inline int32_t PlayerMsg::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.y) + // @@protoc_insertion_point(field_get:protobuf.PlayerMsg.y) + return _internal_y(); } - inline ::int32_t PlayerMsg::_internal_y() const + inline void PlayerMsg::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void PlayerMsg::_internal_set_y(::int32_t value) + inline void PlayerMsg::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.PlayerMsg.y) } // ------------------------------------------------------------------- @@ -2607,26 +2501,25 @@ namespace protobuf // int64 player_id = 1; inline void MoveMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t MoveMsg::player_id() const + inline int64_t MoveMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.MoveMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void MoveMsg::set_player_id(::int64_t value) + inline int64_t MoveMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.MoveMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.MoveMsg.player_id) + return _internal_player_id(); } - inline ::int64_t MoveMsg::_internal_player_id() const + inline void MoveMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void MoveMsg::_internal_set_player_id(::int64_t value) + inline void MoveMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.MoveMsg.player_id) } // double angle = 2; @@ -2634,74 +2527,71 @@ namespace protobuf { _impl_.angle_ = 0; } + inline double MoveMsg::_internal_angle() const + { + return _impl_.angle_; + } inline double MoveMsg::angle() const { // @@protoc_insertion_point(field_get:protobuf.MoveMsg.angle) return _internal_angle(); } + inline void MoveMsg::_internal_set_angle(double value) + { + _impl_.angle_ = value; + } inline void MoveMsg::set_angle(double value) { _internal_set_angle(value); // @@protoc_insertion_point(field_set:protobuf.MoveMsg.angle) } - inline double MoveMsg::_internal_angle() const - { - return _impl_.angle_; - } - inline void MoveMsg::_internal_set_angle(double value) - { - ; - _impl_.angle_ = value; - } // int64 time_in_milliseconds = 3; inline void MoveMsg::clear_time_in_milliseconds() { - _impl_.time_in_milliseconds_ = ::int64_t{0}; + _impl_.time_in_milliseconds_ = int64_t{0}; } - inline ::int64_t MoveMsg::time_in_milliseconds() const + inline int64_t MoveMsg::_internal_time_in_milliseconds() const { - // @@protoc_insertion_point(field_get:protobuf.MoveMsg.time_in_milliseconds) - return _internal_time_in_milliseconds(); + return _impl_.time_in_milliseconds_; } - inline void MoveMsg::set_time_in_milliseconds(::int64_t value) + inline int64_t MoveMsg::time_in_milliseconds() const { - _internal_set_time_in_milliseconds(value); - // @@protoc_insertion_point(field_set:protobuf.MoveMsg.time_in_milliseconds) + // @@protoc_insertion_point(field_get:protobuf.MoveMsg.time_in_milliseconds) + return _internal_time_in_milliseconds(); } - inline ::int64_t MoveMsg::_internal_time_in_milliseconds() const + inline void MoveMsg::_internal_set_time_in_milliseconds(int64_t value) { - return _impl_.time_in_milliseconds_; + _impl_.time_in_milliseconds_ = value; } - inline void MoveMsg::_internal_set_time_in_milliseconds(::int64_t value) + inline void MoveMsg::set_time_in_milliseconds(int64_t value) { - ; - _impl_.time_in_milliseconds_ = value; + _internal_set_time_in_milliseconds(value); + // @@protoc_insertion_point(field_set:protobuf.MoveMsg.time_in_milliseconds) } // int64 team_id = 4; inline void MoveMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t MoveMsg::team_id() const + inline int64_t MoveMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.MoveMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void MoveMsg::set_team_id(::int64_t value) + inline int64_t MoveMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.MoveMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.MoveMsg.team_id) + return _internal_team_id(); } - inline ::int64_t MoveMsg::_internal_team_id() const + inline void MoveMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void MoveMsg::_internal_set_team_id(::int64_t value) + inline void MoveMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.MoveMsg.team_id) } // ------------------------------------------------------------------- @@ -2711,65 +2601,67 @@ namespace protobuf // int64 player_id = 1; inline void SendMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t SendMsg::player_id() const + inline int64_t SendMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.SendMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void SendMsg::set_player_id(::int64_t value) + inline int64_t SendMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.SendMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.SendMsg.player_id) + return _internal_player_id(); } - inline ::int64_t SendMsg::_internal_player_id() const + inline void SendMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void SendMsg::_internal_set_player_id(::int64_t value) + inline void SendMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.SendMsg.player_id) } // int64 to_player_id = 2; inline void SendMsg::clear_to_player_id() { - _impl_.to_player_id_ = ::int64_t{0}; + _impl_.to_player_id_ = int64_t{0}; } - inline ::int64_t SendMsg::to_player_id() const + inline int64_t SendMsg::_internal_to_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.SendMsg.to_player_id) - return _internal_to_player_id(); + return _impl_.to_player_id_; } - inline void SendMsg::set_to_player_id(::int64_t value) + inline int64_t SendMsg::to_player_id() const { - _internal_set_to_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.SendMsg.to_player_id) + // @@protoc_insertion_point(field_get:protobuf.SendMsg.to_player_id) + return _internal_to_player_id(); } - inline ::int64_t SendMsg::_internal_to_player_id() const + inline void SendMsg::_internal_set_to_player_id(int64_t value) { - return _impl_.to_player_id_; + _impl_.to_player_id_ = value; } - inline void SendMsg::_internal_set_to_player_id(::int64_t value) + inline void SendMsg::set_to_player_id(int64_t value) { - ; - _impl_.to_player_id_ = value; + _internal_set_to_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.SendMsg.to_player_id) } // string text_message = 3; - inline bool SendMsg::has_text_message() const + inline bool SendMsg::_internal_has_text_message() const { return message_case() == kTextMessage; } + inline bool SendMsg::has_text_message() const + { + return _internal_has_text_message(); + } inline void SendMsg::set_has_text_message() { _impl_._oneof_case_[0] = kTextMessage; } inline void SendMsg::clear_text_message() { - if (message_case() == kTextMessage) + if (_internal_has_text_message()) { _impl_.message_.text_message_.Destroy(); clear_has_message(); @@ -2780,17 +2672,16 @@ namespace protobuf // @@protoc_insertion_point(field_get:protobuf.SendMsg.text_message) return _internal_text_message(); } - template - inline PROTOBUF_ALWAYS_INLINE void SendMsg::set_text_message(Arg_&& arg, Args_... args) + template + inline void SendMsg::set_text_message(ArgT0&& arg0, ArgT... args) { - if (message_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_message(); - set_has_text_message(); _impl_.message_.text_message_.InitDefault(); } - _impl_.message_.text_message_.Set(static_cast(arg), args..., GetArenaForAllocation()); + _impl_.message_.text_message_.Set(static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:protobuf.SendMsg.text_message) } inline std::string* SendMsg::mutable_text_message() @@ -2801,30 +2692,27 @@ namespace protobuf } inline const std::string& SendMsg::_internal_text_message() const { - if (message_case() != kTextMessage) + if (_internal_has_text_message()) { - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); + return _impl_.message_.text_message_.Get(); } - return _impl_.message_.text_message_.Get(); + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); } inline void SendMsg::_internal_set_text_message(const std::string& value) { - if (message_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_message(); - set_has_text_message(); _impl_.message_.text_message_.InitDefault(); } - _impl_.message_.text_message_.Set(value, GetArenaForAllocation()); } inline std::string* SendMsg::_internal_mutable_text_message() { - if (message_case() != kTextMessage) + if (!_internal_has_text_message()) { clear_message(); - set_has_text_message(); _impl_.message_.text_message_.InitDefault(); } @@ -2833,39 +2721,46 @@ namespace protobuf inline std::string* SendMsg::release_text_message() { // @@protoc_insertion_point(field_release:protobuf.SendMsg.text_message) - if (message_case() != kTextMessage) + if (_internal_has_text_message()) + { + clear_has_message(); + return _impl_.message_.text_message_.Release(); + } + else { return nullptr; } - clear_has_message(); - return _impl_.message_.text_message_.Release(); } - inline void SendMsg::set_allocated_text_message(std::string* value) + inline void SendMsg::set_allocated_text_message(std::string* text_message) { if (has_message()) { clear_message(); } - if (value != nullptr) + if (text_message != nullptr) { set_has_text_message(); - _impl_.message_.text_message_.InitAllocated(value, GetArenaForAllocation()); + _impl_.message_.text_message_.InitAllocated(text_message, GetArenaForAllocation()); } // @@protoc_insertion_point(field_set_allocated:protobuf.SendMsg.text_message) } // bytes binary_message = 4; - inline bool SendMsg::has_binary_message() const + inline bool SendMsg::_internal_has_binary_message() const { return message_case() == kBinaryMessage; } + inline bool SendMsg::has_binary_message() const + { + return _internal_has_binary_message(); + } inline void SendMsg::set_has_binary_message() { _impl_._oneof_case_[0] = kBinaryMessage; } inline void SendMsg::clear_binary_message() { - if (message_case() == kBinaryMessage) + if (_internal_has_binary_message()) { _impl_.message_.binary_message_.Destroy(); clear_has_message(); @@ -2876,17 +2771,16 @@ namespace protobuf // @@protoc_insertion_point(field_get:protobuf.SendMsg.binary_message) return _internal_binary_message(); } - template - inline PROTOBUF_ALWAYS_INLINE void SendMsg::set_binary_message(Arg_&& arg, Args_... args) + template + inline void SendMsg::set_binary_message(ArgT0&& arg0, ArgT... args) { - if (message_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_message(); - set_has_binary_message(); _impl_.message_.binary_message_.InitDefault(); } - _impl_.message_.binary_message_.SetBytes(static_cast(arg), args..., GetArenaForAllocation()); + _impl_.message_.binary_message_.SetBytes(static_cast(arg0), args..., GetArenaForAllocation()); // @@protoc_insertion_point(field_set:protobuf.SendMsg.binary_message) } inline std::string* SendMsg::mutable_binary_message() @@ -2897,30 +2791,27 @@ namespace protobuf } inline const std::string& SendMsg::_internal_binary_message() const { - if (message_case() != kBinaryMessage) + if (_internal_has_binary_message()) { - return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); + return _impl_.message_.binary_message_.Get(); } - return _impl_.message_.binary_message_.Get(); + return ::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(); } inline void SendMsg::_internal_set_binary_message(const std::string& value) { - if (message_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_message(); - set_has_binary_message(); _impl_.message_.binary_message_.InitDefault(); } - _impl_.message_.binary_message_.Set(value, GetArenaForAllocation()); } inline std::string* SendMsg::_internal_mutable_binary_message() { - if (message_case() != kBinaryMessage) + if (!_internal_has_binary_message()) { clear_message(); - set_has_binary_message(); _impl_.message_.binary_message_.InitDefault(); } @@ -2929,23 +2820,26 @@ namespace protobuf inline std::string* SendMsg::release_binary_message() { // @@protoc_insertion_point(field_release:protobuf.SendMsg.binary_message) - if (message_case() != kBinaryMessage) + if (_internal_has_binary_message()) + { + clear_has_message(); + return _impl_.message_.binary_message_.Release(); + } + else { return nullptr; } - clear_has_message(); - return _impl_.message_.binary_message_.Release(); } - inline void SendMsg::set_allocated_binary_message(std::string* value) + inline void SendMsg::set_allocated_binary_message(std::string* binary_message) { if (has_message()) { clear_message(); } - if (value != nullptr) + if (binary_message != nullptr) { set_has_binary_message(); - _impl_.message_.binary_message_.InitAllocated(value, GetArenaForAllocation()); + _impl_.message_.binary_message_.InitAllocated(binary_message, GetArenaForAllocation()); } // @@protoc_insertion_point(field_set_allocated:protobuf.SendMsg.binary_message) } @@ -2953,26 +2847,25 @@ namespace protobuf // int64 team_id = 5; inline void SendMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t SendMsg::team_id() const + inline int64_t SendMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.SendMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void SendMsg::set_team_id(::int64_t value) + inline int64_t SendMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.SendMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.SendMsg.team_id) + return _internal_team_id(); } - inline ::int64_t SendMsg::_internal_team_id() const + inline void SendMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void SendMsg::_internal_set_team_id(::int64_t value) + inline void SendMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.SendMsg.team_id) } inline bool SendMsg::has_message() const @@ -2994,26 +2887,25 @@ namespace protobuf // int64 player_id = 1; inline void AttackMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t AttackMsg::player_id() const + inline int64_t AttackMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.AttackMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void AttackMsg::set_player_id(::int64_t value) + inline int64_t AttackMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.AttackMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.AttackMsg.player_id) + return _internal_player_id(); } - inline ::int64_t AttackMsg::_internal_player_id() const + inline void AttackMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void AttackMsg::_internal_set_player_id(::int64_t value) + inline void AttackMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.AttackMsg.player_id) } // double angle = 2; @@ -3021,49 +2913,47 @@ namespace protobuf { _impl_.angle_ = 0; } + inline double AttackMsg::_internal_angle() const + { + return _impl_.angle_; + } inline double AttackMsg::angle() const { // @@protoc_insertion_point(field_get:protobuf.AttackMsg.angle) return _internal_angle(); } + inline void AttackMsg::_internal_set_angle(double value) + { + _impl_.angle_ = value; + } inline void AttackMsg::set_angle(double value) { _internal_set_angle(value); // @@protoc_insertion_point(field_set:protobuf.AttackMsg.angle) } - inline double AttackMsg::_internal_angle() const - { - return _impl_.angle_; - } - inline void AttackMsg::_internal_set_angle(double value) - { - ; - _impl_.angle_ = value; - } // int64 team_id = 3; inline void AttackMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t AttackMsg::team_id() const + inline int64_t AttackMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.AttackMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void AttackMsg::set_team_id(::int64_t value) + inline int64_t AttackMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.AttackMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.AttackMsg.team_id) + return _internal_team_id(); } - inline ::int64_t AttackMsg::_internal_team_id() const + inline void AttackMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void AttackMsg::_internal_set_team_id(::int64_t value) + inline void AttackMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.AttackMsg.team_id) } // ------------------------------------------------------------------- @@ -3073,26 +2963,25 @@ namespace protobuf // int64 player_id = 1; inline void ConstructMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t ConstructMsg::player_id() const + inline int64_t ConstructMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.ConstructMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void ConstructMsg::set_player_id(::int64_t value) + inline int64_t ConstructMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.ConstructMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.ConstructMsg.player_id) + return _internal_player_id(); } - inline ::int64_t ConstructMsg::_internal_player_id() const + inline void ConstructMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void ConstructMsg::_internal_set_player_id(::int64_t value) + inline void ConstructMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.ConstructMsg.player_id) } // .protobuf.ConstructionType construction_type = 2; @@ -3100,49 +2989,47 @@ namespace protobuf { _impl_.construction_type_ = 0; } + inline ::protobuf::ConstructionType ConstructMsg::_internal_construction_type() const + { + return static_cast<::protobuf::ConstructionType>(_impl_.construction_type_); + } inline ::protobuf::ConstructionType ConstructMsg::construction_type() const { // @@protoc_insertion_point(field_get:protobuf.ConstructMsg.construction_type) return _internal_construction_type(); } + inline void ConstructMsg::_internal_set_construction_type(::protobuf::ConstructionType value) + { + _impl_.construction_type_ = value; + } inline void ConstructMsg::set_construction_type(::protobuf::ConstructionType value) { _internal_set_construction_type(value); // @@protoc_insertion_point(field_set:protobuf.ConstructMsg.construction_type) } - inline ::protobuf::ConstructionType ConstructMsg::_internal_construction_type() const - { - return static_cast<::protobuf::ConstructionType>(_impl_.construction_type_); - } - inline void ConstructMsg::_internal_set_construction_type(::protobuf::ConstructionType value) - { - ; - _impl_.construction_type_ = value; - } // int64 team_id = 3; inline void ConstructMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t ConstructMsg::team_id() const + inline int64_t ConstructMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.ConstructMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void ConstructMsg::set_team_id(::int64_t value) + inline int64_t ConstructMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.ConstructMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.ConstructMsg.team_id) + return _internal_team_id(); } - inline ::int64_t ConstructMsg::_internal_team_id() const + inline void ConstructMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void ConstructMsg::_internal_set_team_id(::int64_t value) + inline void ConstructMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.ConstructMsg.team_id) } // ------------------------------------------------------------------- @@ -3152,76 +3039,73 @@ namespace protobuf // int64 player_id = 1; inline void RecoverMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t RecoverMsg::player_id() const + inline int64_t RecoverMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void RecoverMsg::set_player_id(::int64_t value) + inline int64_t RecoverMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.player_id) + return _internal_player_id(); } - inline ::int64_t RecoverMsg::_internal_player_id() const + inline void RecoverMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void RecoverMsg::_internal_set_player_id(::int64_t value) + inline void RecoverMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.player_id) } // int64 recover = 2; inline void RecoverMsg::clear_recover() { - _impl_.recover_ = ::int64_t{0}; + _impl_.recover_ = int64_t{0}; } - inline ::int64_t RecoverMsg::recover() const + inline int64_t RecoverMsg::_internal_recover() const { - // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.recover) - return _internal_recover(); + return _impl_.recover_; } - inline void RecoverMsg::set_recover(::int64_t value) + inline int64_t RecoverMsg::recover() const { - _internal_set_recover(value); - // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.recover) + // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.recover) + return _internal_recover(); } - inline ::int64_t RecoverMsg::_internal_recover() const + inline void RecoverMsg::_internal_set_recover(int64_t value) { - return _impl_.recover_; + _impl_.recover_ = value; } - inline void RecoverMsg::_internal_set_recover(::int64_t value) + inline void RecoverMsg::set_recover(int64_t value) { - ; - _impl_.recover_ = value; + _internal_set_recover(value); + // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.recover) } // int64 team_id = 3; inline void RecoverMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t RecoverMsg::team_id() const + inline int64_t RecoverMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void RecoverMsg::set_team_id(::int64_t value) + inline int64_t RecoverMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.RecoverMsg.team_id) + return _internal_team_id(); } - inline ::int64_t RecoverMsg::_internal_team_id() const + inline void RecoverMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void RecoverMsg::_internal_set_team_id(::int64_t value) + inline void RecoverMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.RecoverMsg.team_id) } // ------------------------------------------------------------------- @@ -3233,74 +3117,71 @@ namespace protobuf { _impl_.module_type_ = 0; } + inline ::protobuf::ModuleType InstallMsg::_internal_module_type() const + { + return static_cast<::protobuf::ModuleType>(_impl_.module_type_); + } inline ::protobuf::ModuleType InstallMsg::module_type() const { // @@protoc_insertion_point(field_get:protobuf.InstallMsg.module_type) return _internal_module_type(); } + inline void InstallMsg::_internal_set_module_type(::protobuf::ModuleType value) + { + _impl_.module_type_ = value; + } inline void InstallMsg::set_module_type(::protobuf::ModuleType value) { _internal_set_module_type(value); // @@protoc_insertion_point(field_set:protobuf.InstallMsg.module_type) } - inline ::protobuf::ModuleType InstallMsg::_internal_module_type() const - { - return static_cast<::protobuf::ModuleType>(_impl_.module_type_); - } - inline void InstallMsg::_internal_set_module_type(::protobuf::ModuleType value) - { - ; - _impl_.module_type_ = value; - } // int64 player_id = 2; inline void InstallMsg::clear_player_id() { - _impl_.player_id_ = ::int64_t{0}; + _impl_.player_id_ = int64_t{0}; } - inline ::int64_t InstallMsg::player_id() const + inline int64_t InstallMsg::_internal_player_id() const { - // @@protoc_insertion_point(field_get:protobuf.InstallMsg.player_id) - return _internal_player_id(); + return _impl_.player_id_; } - inline void InstallMsg::set_player_id(::int64_t value) + inline int64_t InstallMsg::player_id() const { - _internal_set_player_id(value); - // @@protoc_insertion_point(field_set:protobuf.InstallMsg.player_id) + // @@protoc_insertion_point(field_get:protobuf.InstallMsg.player_id) + return _internal_player_id(); } - inline ::int64_t InstallMsg::_internal_player_id() const + inline void InstallMsg::_internal_set_player_id(int64_t value) { - return _impl_.player_id_; + _impl_.player_id_ = value; } - inline void InstallMsg::_internal_set_player_id(::int64_t value) + inline void InstallMsg::set_player_id(int64_t value) { - ; - _impl_.player_id_ = value; + _internal_set_player_id(value); + // @@protoc_insertion_point(field_set:protobuf.InstallMsg.player_id) } // int64 team_id = 3; inline void InstallMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t InstallMsg::team_id() const + inline int64_t InstallMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.InstallMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void InstallMsg::set_team_id(::int64_t value) + inline int64_t InstallMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.InstallMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.InstallMsg.team_id) + return _internal_team_id(); } - inline ::int64_t InstallMsg::_internal_team_id() const + inline void InstallMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void InstallMsg::_internal_set_team_id(::int64_t value) + inline void InstallMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.InstallMsg.team_id) } // ------------------------------------------------------------------- @@ -3312,24 +3193,23 @@ namespace protobuf { _impl_.x_ = 0; } - inline ::int32_t BuildShipMsg::x() const + inline int32_t BuildShipMsg::_internal_x() const { - // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.x) - return _internal_x(); + return _impl_.x_; } - inline void BuildShipMsg::set_x(::int32_t value) + inline int32_t BuildShipMsg::x() const { - _internal_set_x(value); - // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.x) + // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.x) + return _internal_x(); } - inline ::int32_t BuildShipMsg::_internal_x() const + inline void BuildShipMsg::_internal_set_x(int32_t value) { - return _impl_.x_; + _impl_.x_ = value; } - inline void BuildShipMsg::_internal_set_x(::int32_t value) + inline void BuildShipMsg::set_x(int32_t value) { - ; - _impl_.x_ = value; + _internal_set_x(value); + // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.x) } // int32 y = 2; @@ -3337,24 +3217,23 @@ namespace protobuf { _impl_.y_ = 0; } - inline ::int32_t BuildShipMsg::y() const + inline int32_t BuildShipMsg::_internal_y() const { - // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.y) - return _internal_y(); + return _impl_.y_; } - inline void BuildShipMsg::set_y(::int32_t value) + inline int32_t BuildShipMsg::y() const { - _internal_set_y(value); - // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.y) + // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.y) + return _internal_y(); } - inline ::int32_t BuildShipMsg::_internal_y() const + inline void BuildShipMsg::_internal_set_y(int32_t value) { - return _impl_.y_; + _impl_.y_ = value; } - inline void BuildShipMsg::_internal_set_y(::int32_t value) + inline void BuildShipMsg::set_y(int32_t value) { - ; - _impl_.y_ = value; + _internal_set_y(value); + // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.y) } // .protobuf.ShipType ship_type = 3; @@ -3362,60 +3241,75 @@ namespace protobuf { _impl_.ship_type_ = 0; } + inline ::protobuf::ShipType BuildShipMsg::_internal_ship_type() const + { + return static_cast<::protobuf::ShipType>(_impl_.ship_type_); + } inline ::protobuf::ShipType BuildShipMsg::ship_type() const { // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.ship_type) return _internal_ship_type(); } + inline void BuildShipMsg::_internal_set_ship_type(::protobuf::ShipType value) + { + _impl_.ship_type_ = value; + } inline void BuildShipMsg::set_ship_type(::protobuf::ShipType value) { _internal_set_ship_type(value); // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.ship_type) } - inline ::protobuf::ShipType BuildShipMsg::_internal_ship_type() const - { - return static_cast<::protobuf::ShipType>(_impl_.ship_type_); - } - inline void BuildShipMsg::_internal_set_ship_type(::protobuf::ShipType value) - { - ; - _impl_.ship_type_ = value; - } // int64 team_id = 4; inline void BuildShipMsg::clear_team_id() { - _impl_.team_id_ = ::int64_t{0}; + _impl_.team_id_ = int64_t{0}; } - inline ::int64_t BuildShipMsg::team_id() const + inline int64_t BuildShipMsg::_internal_team_id() const { - // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.team_id) - return _internal_team_id(); + return _impl_.team_id_; } - inline void BuildShipMsg::set_team_id(::int64_t value) + inline int64_t BuildShipMsg::team_id() const { - _internal_set_team_id(value); - // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.team_id) + // @@protoc_insertion_point(field_get:protobuf.BuildShipMsg.team_id) + return _internal_team_id(); } - inline ::int64_t BuildShipMsg::_internal_team_id() const + inline void BuildShipMsg::_internal_set_team_id(int64_t value) { - return _impl_.team_id_; + _impl_.team_id_ = value; } - inline void BuildShipMsg::_internal_set_team_id(::int64_t value) + inline void BuildShipMsg::set_team_id(int64_t value) { - ; - _impl_.team_id_ = value; + _internal_set_team_id(value); + // @@protoc_insertion_point(field_set:protobuf.BuildShipMsg.team_id) } #ifdef __GNUC__ #pragma GCC diagnostic pop #endif // __GNUC__ + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- + + // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) + } // namespace protobuf // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" - -#endif // GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto_2epb_2eh +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_Message2Server_2eproto diff --git a/CAPI/cpp/proto/MessageType.pb.cc b/CAPI/cpp/proto/MessageType.pb.cc index 394995aa..a86cbfb1 100644 --- a/CAPI/cpp/proto/MessageType.pb.cc +++ b/CAPI/cpp/proto/MessageType.pb.cc @@ -4,30 +4,32 @@ #include "MessageType.pb.h" #include -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/extension_set.h" -#include "google/protobuf/wire_format_lite.h" -#include "google/protobuf/descriptor.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/reflection_ops.h" -#include "google/protobuf/wire_format.h" + +#include +#include +#include +#include +#include +#include +#include // @@protoc_insertion_point(includes) +#include -// Must be included last. -#include "google/protobuf/port_def.inc" PROTOBUF_PRAGMA_INIT_SEG + namespace _pb = ::PROTOBUF_NAMESPACE_ID; -namespace _pbi = ::PROTOBUF_NAMESPACE_ID::internal; +namespace _pbi = _pb::internal; + namespace protobuf { } // namespace protobuf static const ::_pb::EnumDescriptor* file_level_enum_descriptors_MessageType_2eproto[16]; -static constexpr const ::_pb::ServiceDescriptor** - file_level_service_descriptors_MessageType_2eproto = nullptr; -const ::uint32_t TableStruct_MessageType_2eproto::offsets[1] = {}; +static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_MessageType_2eproto = nullptr; +const uint32_t TableStruct_MessageType_2eproto::offsets[1] = {}; static constexpr ::_pbi::MigrationSchema* schemas = nullptr; static constexpr ::_pb::Message* const* file_default_instances = nullptr; -const char descriptor_table_protodef_MessageType_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + +const char descriptor_table_protodef_MessageType_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\021MessageType.proto\022\010protobuf*P\n\tGameSta" "te\022\023\n\017NULL_GAME_STATE\020\000\022\016\n\nGAME_START\020\001\022" "\020\n\014GAME_RUNNING\020\002\022\014\n\010GAME_END\020\003*\207\001\n\tPlac" @@ -71,8 +73,8 @@ const char descriptor_table_protodef_MessageType_2eproto[] PROTOBUF_SECTION_VARI "\n\tCOMMUNITY\020\002\022\010\n\004FORT\020\003*4\n\010NewsType\022\022\n\016N" "ULL_NEWS_TYPE\020\000\022\010\n\004TEXT\020\001\022\n\n\006BINARY\020\002*.\n" "\nPlayerTeam\022\r\n\tNULL_TEAM\020\000\022\007\n\003RED\020\001\022\010\n\004B" - "LUE\020\002b\006proto3"}; -static ::absl::once_flag descriptor_table_MessageType_2eproto_once; + "LUE\020\002b\006proto3"; +static ::_pbi::once_flag descriptor_table_MessageType_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_MessageType_2eproto = { false, false, @@ -90,25 +92,13 @@ const ::_pbi::DescriptorTable descriptor_table_MessageType_2eproto = { file_level_enum_descriptors_MessageType_2eproto, file_level_service_descriptors_MessageType_2eproto, }; - -// This function exists to be marked as weak. -// It can significantly speed up compilation by breaking up LLVM's SCC -// in the .pb.cc translation units. Large translation units see a -// reduction of more than 35% of walltime for optimized builds. Without -// the weak attribute all the messages in the file, including all the -// vtables and everything they use become part of the same SCC through -// a cycle like: -// GetMetadata -> descriptor table -> default instances -> -// vtables -> GetMetadata -// By adding a weak function here we break the connection from the -// individual vtables back into the descriptor table. PROTOBUF_ATTRIBUTE_WEAK const ::_pbi::DescriptorTable* descriptor_table_MessageType_2eproto_getter() { return &descriptor_table_MessageType_2eproto; } + // Force running AddDescriptors() at dynamic initialization time. -PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 -static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_MessageType_2eproto(&descriptor_table_MessageType_2eproto); +PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_MessageType_2eproto(&descriptor_table_MessageType_2eproto); namespace protobuf { const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* GameState_descriptor() @@ -129,6 +119,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlaceType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -152,6 +143,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShapeType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -169,6 +161,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlayerType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -186,6 +179,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShipType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -204,6 +198,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShipState_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -228,6 +223,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* WeaponType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -248,6 +244,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConstructorType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -266,6 +263,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ArmorType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -284,6 +282,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShieldType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -302,6 +301,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ProducerType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -320,6 +320,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ModuleType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -352,6 +353,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* BulletType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -372,6 +374,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConstructionType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -390,6 +393,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* NewsType_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -407,6 +411,7 @@ namespace protobuf return false; } } + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlayerTeam_descriptor() { ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_MessageType_2eproto); @@ -424,9 +429,11 @@ namespace protobuf return false; } } + // @@protoc_insertion_point(namespace_scope) } // namespace protobuf PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_CLOSE + // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" +#include diff --git a/CAPI/cpp/proto/MessageType.pb.h b/CAPI/cpp/proto/MessageType.pb.h index cb6b79b3..8b4e82d4 100644 --- a/CAPI/cpp/proto/MessageType.pb.h +++ b/CAPI/cpp/proto/MessageType.pb.h @@ -1,42 +1,37 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: MessageType.proto -#ifndef GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto_2epb_2eh -#define GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto_2epb_2eh +#ifndef GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto #include #include -#include - -#include "google/protobuf/port_def.inc" -#if PROTOBUF_VERSION < 4023000 -#error "This file was generated by a newer version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please update" -#error "your headers." -#endif // PROTOBUF_VERSION - -#if 4023004 < PROTOBUF_MIN_PROTOC_VERSION -#error "This file was generated by an older version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please" -#error "regenerate this file with a newer version of protoc." -#endif // PROTOBUF_MIN_PROTOC_VERSION -#include "google/protobuf/port_undef.inc" -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/arena.h" -#include "google/protobuf/arenastring.h" -#include "google/protobuf/generated_message_util.h" -#include "google/protobuf/metadata_lite.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/repeated_field.h" // IWYU pragma: export -#include "google/protobuf/extension_set.h" // IWYU pragma: export -#include "google/protobuf/generated_enum_reflection.h" -// @@protoc_insertion_point(includes) - -// Must be included last. -#include "google/protobuf/port_def.inc" +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021006 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export +#include +// @@protoc_insertion_point(includes) +#include #define PROTOBUF_INTERNAL_EXPORT_MessageType_2eproto - PROTOBUF_NAMESPACE_OPEN namespace internal { @@ -47,47 +42,40 @@ PROTOBUF_NAMESPACE_CLOSE // Internal implementation detail -- do not use these members. struct TableStruct_MessageType_2eproto { - static const ::uint32_t offsets[]; + static const uint32_t offsets[]; }; -extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable - descriptor_table_MessageType_2eproto; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_MessageType_2eproto; PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_CLOSE - namespace protobuf { + enum GameState : int { NULL_GAME_STATE = 0, GAME_START = 1, GAME_RUNNING = 2, GAME_END = 3, - GameState_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - GameState_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + GameState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + GameState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool GameState_IsValid(int value); - constexpr GameState GameState_MIN = static_cast(0); - constexpr GameState GameState_MAX = static_cast(3); - constexpr int GameState_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - GameState_descriptor(); + constexpr GameState GameState_MIN = NULL_GAME_STATE; + constexpr GameState GameState_MAX = GAME_END; + constexpr int GameState_ARRAYSIZE = GameState_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* GameState_descriptor(); template - const std::string& GameState_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to GameState_Name()."); - return GameState_Name(static_cast(value)); - } - template<> - inline const std::string& GameState_Name(GameState value) + inline const std::string& GameState_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function GameState_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + GameState_descriptor(), enum_t_value ); } - inline bool GameState_Parse(absl::string_view name, GameState* value) + inline bool GameState_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, GameState* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( GameState_descriptor(), name, value @@ -104,32 +92,26 @@ namespace protobuf RESOURCE = 6, CONSTRUCTION = 7, WORMHOLE = 8, - PlaceType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - PlaceType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + PlaceType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + PlaceType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool PlaceType_IsValid(int value); - constexpr PlaceType PlaceType_MIN = static_cast(0); - constexpr PlaceType PlaceType_MAX = static_cast(8); - constexpr int PlaceType_ARRAYSIZE = 8 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - PlaceType_descriptor(); + constexpr PlaceType PlaceType_MIN = NULL_PLACE_TYPE; + constexpr PlaceType PlaceType_MAX = WORMHOLE; + constexpr int PlaceType_ARRAYSIZE = PlaceType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlaceType_descriptor(); template - const std::string& PlaceType_Name(T value) + inline const std::string& PlaceType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to PlaceType_Name()."); - return PlaceType_Name(static_cast(value)); - } - template<> - inline const std::string& PlaceType_Name(PlaceType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function PlaceType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + PlaceType_descriptor(), enum_t_value ); } - inline bool PlaceType_Parse(absl::string_view name, PlaceType* value) + inline bool PlaceType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, PlaceType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( PlaceType_descriptor(), name, value @@ -140,32 +122,26 @@ namespace protobuf NULL_SHAPE_TYPE = 0, CIRCLE = 1, SQUARE = 2, - ShapeType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ShapeType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ShapeType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ShapeType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ShapeType_IsValid(int value); - constexpr ShapeType ShapeType_MIN = static_cast(0); - constexpr ShapeType ShapeType_MAX = static_cast(2); - constexpr int ShapeType_ARRAYSIZE = 2 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ShapeType_descriptor(); + constexpr ShapeType ShapeType_MIN = NULL_SHAPE_TYPE; + constexpr ShapeType ShapeType_MAX = SQUARE; + constexpr int ShapeType_ARRAYSIZE = ShapeType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShapeType_descriptor(); template - const std::string& ShapeType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ShapeType_Name()."); - return ShapeType_Name(static_cast(value)); - } - template<> - inline const std::string& ShapeType_Name(ShapeType value) + inline const std::string& ShapeType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ShapeType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ShapeType_descriptor(), enum_t_value ); } - inline bool ShapeType_Parse(absl::string_view name, ShapeType* value) + inline bool ShapeType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ShapeType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ShapeType_descriptor(), name, value @@ -176,32 +152,26 @@ namespace protobuf NULL_PLAYER_TYPE = 0, SHIP = 1, TEAM = 2, - PlayerType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - PlayerType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + PlayerType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + PlayerType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool PlayerType_IsValid(int value); - constexpr PlayerType PlayerType_MIN = static_cast(0); - constexpr PlayerType PlayerType_MAX = static_cast(2); - constexpr int PlayerType_ARRAYSIZE = 2 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - PlayerType_descriptor(); + constexpr PlayerType PlayerType_MIN = NULL_PLAYER_TYPE; + constexpr PlayerType PlayerType_MAX = TEAM; + constexpr int PlayerType_ARRAYSIZE = PlayerType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlayerType_descriptor(); template - const std::string& PlayerType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to PlayerType_Name()."); - return PlayerType_Name(static_cast(value)); - } - template<> - inline const std::string& PlayerType_Name(PlayerType value) + inline const std::string& PlayerType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function PlayerType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + PlayerType_descriptor(), enum_t_value ); } - inline bool PlayerType_Parse(absl::string_view name, PlayerType* value) + inline bool PlayerType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, PlayerType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( PlayerType_descriptor(), name, value @@ -213,32 +183,26 @@ namespace protobuf CIVILIAN_SHIP = 1, MILITARY_SHIP = 2, FLAG_SHIP = 3, - ShipType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ShipType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ShipType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ShipType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ShipType_IsValid(int value); - constexpr ShipType ShipType_MIN = static_cast(0); - constexpr ShipType ShipType_MAX = static_cast(3); - constexpr int ShipType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ShipType_descriptor(); + constexpr ShipType ShipType_MIN = NULL_SHIP_TYPE; + constexpr ShipType ShipType_MAX = FLAG_SHIP; + constexpr int ShipType_ARRAYSIZE = ShipType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShipType_descriptor(); template - const std::string& ShipType_Name(T value) + inline const std::string& ShipType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ShipType_Name()."); - return ShipType_Name(static_cast(value)); - } - template<> - inline const std::string& ShipType_Name(ShipType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ShipType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ShipType_descriptor(), enum_t_value ); } - inline bool ShipType_Parse(absl::string_view name, ShipType* value) + inline bool ShipType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ShipType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ShipType_descriptor(), name, value @@ -256,32 +220,26 @@ namespace protobuf SWINGING = 7, STUNNED = 8, MOVING = 9, - ShipState_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ShipState_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ShipState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ShipState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ShipState_IsValid(int value); - constexpr ShipState ShipState_MIN = static_cast(0); - constexpr ShipState ShipState_MAX = static_cast(9); - constexpr int ShipState_ARRAYSIZE = 9 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ShipState_descriptor(); + constexpr ShipState ShipState_MIN = NULL_STATUS; + constexpr ShipState ShipState_MAX = MOVING; + constexpr int ShipState_ARRAYSIZE = ShipState_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShipState_descriptor(); template - const std::string& ShipState_Name(T value) + inline const std::string& ShipState_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ShipState_Name()."); - return ShipState_Name(static_cast(value)); - } - template<> - inline const std::string& ShipState_Name(ShipState value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ShipState_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ShipState_descriptor(), enum_t_value ); } - inline bool ShipState_Parse(absl::string_view name, ShipState* value) + inline bool ShipState_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ShipState* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ShipState_descriptor(), name, value @@ -295,32 +253,26 @@ namespace protobuf SHELLGUN = 3, MISSILEGUN = 4, ARCGUN = 5, - WeaponType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - WeaponType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + WeaponType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + WeaponType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool WeaponType_IsValid(int value); - constexpr WeaponType WeaponType_MIN = static_cast(0); - constexpr WeaponType WeaponType_MAX = static_cast(5); - constexpr int WeaponType_ARRAYSIZE = 5 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - WeaponType_descriptor(); + constexpr WeaponType WeaponType_MIN = NULL_WEAPON_TYPE; + constexpr WeaponType WeaponType_MAX = ARCGUN; + constexpr int WeaponType_ARRAYSIZE = WeaponType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* WeaponType_descriptor(); template - const std::string& WeaponType_Name(T value) + inline const std::string& WeaponType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to WeaponType_Name()."); - return WeaponType_Name(static_cast(value)); - } - template<> - inline const std::string& WeaponType_Name(WeaponType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function WeaponType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + WeaponType_descriptor(), enum_t_value ); } - inline bool WeaponType_Parse(absl::string_view name, WeaponType* value) + inline bool WeaponType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, WeaponType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( WeaponType_descriptor(), name, value @@ -332,32 +284,26 @@ namespace protobuf CONSTRUCTOR1 = 1, CONSTRUCTOR2 = 2, CONSTRUCTOR3 = 3, - ConstructorType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ConstructorType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ConstructorType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ConstructorType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ConstructorType_IsValid(int value); - constexpr ConstructorType ConstructorType_MIN = static_cast(0); - constexpr ConstructorType ConstructorType_MAX = static_cast(3); - constexpr int ConstructorType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ConstructorType_descriptor(); + constexpr ConstructorType ConstructorType_MIN = NULL_CONSTRUCTOR_TYPE; + constexpr ConstructorType ConstructorType_MAX = CONSTRUCTOR3; + constexpr int ConstructorType_ARRAYSIZE = ConstructorType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConstructorType_descriptor(); template - const std::string& ConstructorType_Name(T value) + inline const std::string& ConstructorType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ConstructorType_Name()."); - return ConstructorType_Name(static_cast(value)); - } - template<> - inline const std::string& ConstructorType_Name(ConstructorType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ConstructorType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ConstructorType_descriptor(), enum_t_value ); } - inline bool ConstructorType_Parse(absl::string_view name, ConstructorType* value) + inline bool ConstructorType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConstructorType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ConstructorType_descriptor(), name, value @@ -369,32 +315,26 @@ namespace protobuf ARMOR1 = 1, ARMOR2 = 2, ARMOR3 = 3, - ArmorType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ArmorType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ArmorType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ArmorType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ArmorType_IsValid(int value); - constexpr ArmorType ArmorType_MIN = static_cast(0); - constexpr ArmorType ArmorType_MAX = static_cast(3); - constexpr int ArmorType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ArmorType_descriptor(); + constexpr ArmorType ArmorType_MIN = NULL_ARMOR_TYPE; + constexpr ArmorType ArmorType_MAX = ARMOR3; + constexpr int ArmorType_ARRAYSIZE = ArmorType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ArmorType_descriptor(); template - const std::string& ArmorType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ArmorType_Name()."); - return ArmorType_Name(static_cast(value)); - } - template<> - inline const std::string& ArmorType_Name(ArmorType value) + inline const std::string& ArmorType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ArmorType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ArmorType_descriptor(), enum_t_value ); } - inline bool ArmorType_Parse(absl::string_view name, ArmorType* value) + inline bool ArmorType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ArmorType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ArmorType_descriptor(), name, value @@ -406,32 +346,26 @@ namespace protobuf SHIELD1 = 1, SHIELD2 = 2, SHIELD3 = 3, - ShieldType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ShieldType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ShieldType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ShieldType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ShieldType_IsValid(int value); - constexpr ShieldType ShieldType_MIN = static_cast(0); - constexpr ShieldType ShieldType_MAX = static_cast(3); - constexpr int ShieldType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ShieldType_descriptor(); + constexpr ShieldType ShieldType_MIN = NULL_SHIELD_TYPE; + constexpr ShieldType ShieldType_MAX = SHIELD3; + constexpr int ShieldType_ARRAYSIZE = ShieldType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ShieldType_descriptor(); template - const std::string& ShieldType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ShieldType_Name()."); - return ShieldType_Name(static_cast(value)); - } - template<> - inline const std::string& ShieldType_Name(ShieldType value) + inline const std::string& ShieldType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ShieldType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ShieldType_descriptor(), enum_t_value ); } - inline bool ShieldType_Parse(absl::string_view name, ShieldType* value) + inline bool ShieldType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ShieldType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ShieldType_descriptor(), name, value @@ -443,32 +377,26 @@ namespace protobuf PRODUCER1 = 1, PRODUCER2 = 2, PRODUCER3 = 3, - ProducerType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ProducerType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ProducerType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ProducerType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ProducerType_IsValid(int value); - constexpr ProducerType ProducerType_MIN = static_cast(0); - constexpr ProducerType ProducerType_MAX = static_cast(3); - constexpr int ProducerType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ProducerType_descriptor(); + constexpr ProducerType ProducerType_MIN = NULL_PRODUCER_TYPE; + constexpr ProducerType ProducerType_MAX = PRODUCER3; + constexpr int ProducerType_ARRAYSIZE = ProducerType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ProducerType_descriptor(); template - const std::string& ProducerType_Name(T value) + inline const std::string& ProducerType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ProducerType_Name()."); - return ProducerType_Name(static_cast(value)); - } - template<> - inline const std::string& ProducerType_Name(ProducerType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ProducerType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ProducerType_descriptor(), enum_t_value ); } - inline bool ProducerType_Parse(absl::string_view name, ProducerType* value) + inline bool ProducerType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ProducerType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ProducerType_descriptor(), name, value @@ -494,32 +422,26 @@ namespace protobuf MODULE_SHELLGUN = 15, MODULE_MISSILEGUN = 16, MODULE_ARCGUN = 17, - ModuleType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ModuleType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ModuleType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ModuleType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ModuleType_IsValid(int value); - constexpr ModuleType ModuleType_MIN = static_cast(0); - constexpr ModuleType ModuleType_MAX = static_cast(17); - constexpr int ModuleType_ARRAYSIZE = 17 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ModuleType_descriptor(); + constexpr ModuleType ModuleType_MIN = NULL_MODULE_TYPE; + constexpr ModuleType ModuleType_MAX = MODULE_ARCGUN; + constexpr int ModuleType_ARRAYSIZE = ModuleType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ModuleType_descriptor(); template - const std::string& ModuleType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ModuleType_Name()."); - return ModuleType_Name(static_cast(value)); - } - template<> - inline const std::string& ModuleType_Name(ModuleType value) + inline const std::string& ModuleType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ModuleType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ModuleType_descriptor(), enum_t_value ); } - inline bool ModuleType_Parse(absl::string_view name, ModuleType* value) + inline bool ModuleType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ModuleType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ModuleType_descriptor(), name, value @@ -533,32 +455,26 @@ namespace protobuf SHELL = 3, MISSILE = 4, ARC = 5, - BulletType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - BulletType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + BulletType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + BulletType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool BulletType_IsValid(int value); - constexpr BulletType BulletType_MIN = static_cast(0); - constexpr BulletType BulletType_MAX = static_cast(5); - constexpr int BulletType_ARRAYSIZE = 5 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - BulletType_descriptor(); + constexpr BulletType BulletType_MIN = NULL_BULLET_TYPE; + constexpr BulletType BulletType_MAX = ARC; + constexpr int BulletType_ARRAYSIZE = BulletType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* BulletType_descriptor(); template - const std::string& BulletType_Name(T value) - { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to BulletType_Name()."); - return BulletType_Name(static_cast(value)); - } - template<> - inline const std::string& BulletType_Name(BulletType value) + inline const std::string& BulletType_Name(T enum_t_value) { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function BulletType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + BulletType_descriptor(), enum_t_value ); } - inline bool BulletType_Parse(absl::string_view name, BulletType* value) + inline bool BulletType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, BulletType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( BulletType_descriptor(), name, value @@ -570,32 +486,26 @@ namespace protobuf FACTORY = 1, COMMUNITY = 2, FORT = 3, - ConstructionType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - ConstructionType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + ConstructionType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + ConstructionType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool ConstructionType_IsValid(int value); - constexpr ConstructionType ConstructionType_MIN = static_cast(0); - constexpr ConstructionType ConstructionType_MAX = static_cast(3); - constexpr int ConstructionType_ARRAYSIZE = 3 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - ConstructionType_descriptor(); + constexpr ConstructionType ConstructionType_MIN = NULL_CONSTRUCTION_TYPE; + constexpr ConstructionType ConstructionType_MAX = FORT; + constexpr int ConstructionType_ARRAYSIZE = ConstructionType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConstructionType_descriptor(); template - const std::string& ConstructionType_Name(T value) + inline const std::string& ConstructionType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to ConstructionType_Name()."); - return ConstructionType_Name(static_cast(value)); - } - template<> - inline const std::string& ConstructionType_Name(ConstructionType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function ConstructionType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + ConstructionType_descriptor(), enum_t_value ); } - inline bool ConstructionType_Parse(absl::string_view name, ConstructionType* value) + inline bool ConstructionType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, ConstructionType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( ConstructionType_descriptor(), name, value @@ -606,32 +516,26 @@ namespace protobuf NULL_NEWS_TYPE = 0, TEXT = 1, BINARY = 2, - NewsType_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - NewsType_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + NewsType_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + NewsType_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool NewsType_IsValid(int value); - constexpr NewsType NewsType_MIN = static_cast(0); - constexpr NewsType NewsType_MAX = static_cast(2); - constexpr int NewsType_ARRAYSIZE = 2 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - NewsType_descriptor(); + constexpr NewsType NewsType_MIN = NULL_NEWS_TYPE; + constexpr NewsType NewsType_MAX = BINARY; + constexpr int NewsType_ARRAYSIZE = NewsType_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* NewsType_descriptor(); template - const std::string& NewsType_Name(T value) + inline const std::string& NewsType_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to NewsType_Name()."); - return NewsType_Name(static_cast(value)); - } - template<> - inline const std::string& NewsType_Name(NewsType value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function NewsType_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + NewsType_descriptor(), enum_t_value ); } - inline bool NewsType_Parse(absl::string_view name, NewsType* value) + inline bool NewsType_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, NewsType* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( NewsType_descriptor(), name, value @@ -642,38 +546,31 @@ namespace protobuf NULL_TEAM = 0, RED = 1, BLUE = 2, - PlayerTeam_INT_MIN_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::min(), - PlayerTeam_INT_MAX_SENTINEL_DO_NOT_USE_ = - std::numeric_limits<::int32_t>::max(), + PlayerTeam_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits::min(), + PlayerTeam_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits::max() }; - bool PlayerTeam_IsValid(int value); - constexpr PlayerTeam PlayerTeam_MIN = static_cast(0); - constexpr PlayerTeam PlayerTeam_MAX = static_cast(2); - constexpr int PlayerTeam_ARRAYSIZE = 2 + 1; - const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* - PlayerTeam_descriptor(); + constexpr PlayerTeam PlayerTeam_MIN = NULL_TEAM; + constexpr PlayerTeam PlayerTeam_MAX = BLUE; + constexpr int PlayerTeam_ARRAYSIZE = PlayerTeam_MAX + 1; + + const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* PlayerTeam_descriptor(); template - const std::string& PlayerTeam_Name(T value) + inline const std::string& PlayerTeam_Name(T enum_t_value) { - static_assert(std::is_same::value || std::is_integral::value, "Incorrect type passed to PlayerTeam_Name()."); - return PlayerTeam_Name(static_cast(value)); - } - template<> - inline const std::string& PlayerTeam_Name(PlayerTeam value) - { - return ::PROTOBUF_NAMESPACE_ID::internal::NameOfDenseEnum( - static_cast(value) + static_assert(::std::is_same::value || ::std::is_integral::value, "Incorrect type passed to function PlayerTeam_Name."); + return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum( + PlayerTeam_descriptor(), enum_t_value ); } - inline bool PlayerTeam_Parse(absl::string_view name, PlayerTeam* value) + inline bool PlayerTeam_Parse( + ::PROTOBUF_NAMESPACE_ID::ConstStringParam name, PlayerTeam* value + ) { return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum( PlayerTeam_descriptor(), name, value ); } - // =================================================================== // =================================================================== @@ -689,12 +586,13 @@ namespace protobuf #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) + } // namespace protobuf PROTOBUF_NAMESPACE_OPEN template<> -struct is_proto_enum<::protobuf::GameState> : std::true_type +struct is_proto_enum<::protobuf::GameState> : ::std::true_type { }; template<> @@ -703,7 +601,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::GameState>() return ::protobuf::GameState_descriptor(); } template<> -struct is_proto_enum<::protobuf::PlaceType> : std::true_type +struct is_proto_enum<::protobuf::PlaceType> : ::std::true_type { }; template<> @@ -712,7 +610,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::PlaceType>() return ::protobuf::PlaceType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ShapeType> : std::true_type +struct is_proto_enum<::protobuf::ShapeType> : ::std::true_type { }; template<> @@ -721,7 +619,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ShapeType>() return ::protobuf::ShapeType_descriptor(); } template<> -struct is_proto_enum<::protobuf::PlayerType> : std::true_type +struct is_proto_enum<::protobuf::PlayerType> : ::std::true_type { }; template<> @@ -730,7 +628,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::PlayerType>() return ::protobuf::PlayerType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ShipType> : std::true_type +struct is_proto_enum<::protobuf::ShipType> : ::std::true_type { }; template<> @@ -739,7 +637,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ShipType>() return ::protobuf::ShipType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ShipState> : std::true_type +struct is_proto_enum<::protobuf::ShipState> : ::std::true_type { }; template<> @@ -748,7 +646,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ShipState>() return ::protobuf::ShipState_descriptor(); } template<> -struct is_proto_enum<::protobuf::WeaponType> : std::true_type +struct is_proto_enum<::protobuf::WeaponType> : ::std::true_type { }; template<> @@ -757,7 +655,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::WeaponType>() return ::protobuf::WeaponType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ConstructorType> : std::true_type +struct is_proto_enum<::protobuf::ConstructorType> : ::std::true_type { }; template<> @@ -766,7 +664,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ConstructorType>() return ::protobuf::ConstructorType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ArmorType> : std::true_type +struct is_proto_enum<::protobuf::ArmorType> : ::std::true_type { }; template<> @@ -775,7 +673,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ArmorType>() return ::protobuf::ArmorType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ShieldType> : std::true_type +struct is_proto_enum<::protobuf::ShieldType> : ::std::true_type { }; template<> @@ -784,7 +682,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ShieldType>() return ::protobuf::ShieldType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ProducerType> : std::true_type +struct is_proto_enum<::protobuf::ProducerType> : ::std::true_type { }; template<> @@ -793,7 +691,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ProducerType>() return ::protobuf::ProducerType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ModuleType> : std::true_type +struct is_proto_enum<::protobuf::ModuleType> : ::std::true_type { }; template<> @@ -802,7 +700,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ModuleType>() return ::protobuf::ModuleType_descriptor(); } template<> -struct is_proto_enum<::protobuf::BulletType> : std::true_type +struct is_proto_enum<::protobuf::BulletType> : ::std::true_type { }; template<> @@ -811,7 +709,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::BulletType>() return ::protobuf::BulletType_descriptor(); } template<> -struct is_proto_enum<::protobuf::ConstructionType> : std::true_type +struct is_proto_enum<::protobuf::ConstructionType> : ::std::true_type { }; template<> @@ -820,7 +718,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::ConstructionType>() return ::protobuf::ConstructionType_descriptor(); } template<> -struct is_proto_enum<::protobuf::NewsType> : std::true_type +struct is_proto_enum<::protobuf::NewsType> : ::std::true_type { }; template<> @@ -829,7 +727,7 @@ inline const EnumDescriptor* GetEnumDescriptor<::protobuf::NewsType>() return ::protobuf::NewsType_descriptor(); } template<> -struct is_proto_enum<::protobuf::PlayerTeam> : std::true_type +struct is_proto_enum<::protobuf::PlayerTeam> : ::std::true_type { }; template<> @@ -842,6 +740,5 @@ PROTOBUF_NAMESPACE_CLOSE // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" - -#endif // GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto_2epb_2eh +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_MessageType_2eproto diff --git a/CAPI/cpp/proto/Services.grpc.pb.cc b/CAPI/cpp/proto/Services.grpc.pb.cc index 07e7bdcf..c1cbfd8b 100644 --- a/CAPI/cpp/proto/Services.grpc.pb.cc +++ b/CAPI/cpp/proto/Services.grpc.pb.cc @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/CAPI/cpp/proto/Services.grpc.pb.h b/CAPI/cpp/proto/Services.grpc.pb.h index a80bcd72..3fdff8d0 100644 --- a/CAPI/cpp/proto/Services.grpc.pb.h +++ b/CAPI/cpp/proto/Services.grpc.pb.h @@ -15,13 +15,13 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include -#include +#include #include #include diff --git a/CAPI/cpp/proto/Services.pb.cc b/CAPI/cpp/proto/Services.pb.cc index 9bb67597..54eabe0f 100644 --- a/CAPI/cpp/proto/Services.pb.cc +++ b/CAPI/cpp/proto/Services.pb.cc @@ -4,31 +4,32 @@ #include "Services.pb.h" #include -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/extension_set.h" -#include "google/protobuf/wire_format_lite.h" -#include "google/protobuf/descriptor.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/reflection_ops.h" -#include "google/protobuf/wire_format.h" + +#include +#include +#include +#include +#include +#include +#include // @@protoc_insertion_point(includes) +#include -// Must be included last. -#include "google/protobuf/port_def.inc" PROTOBUF_PRAGMA_INIT_SEG + namespace _pb = ::PROTOBUF_NAMESPACE_ID; -namespace _pbi = ::PROTOBUF_NAMESPACE_ID::internal; +namespace _pbi = _pb::internal; + namespace protobuf { } // namespace protobuf -static constexpr const ::_pb::EnumDescriptor** - file_level_enum_descriptors_Services_2eproto = nullptr; -static constexpr const ::_pb::ServiceDescriptor** - file_level_service_descriptors_Services_2eproto = nullptr; -const ::uint32_t TableStruct_Services_2eproto::offsets[1] = {}; +static constexpr ::_pb::EnumDescriptor const** file_level_enum_descriptors_Services_2eproto = nullptr; +static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_Services_2eproto = nullptr; +const uint32_t TableStruct_Services_2eproto::offsets[1] = {}; static constexpr ::_pbi::MigrationSchema* schemas = nullptr; static constexpr ::_pb::Message* const* file_default_instances = nullptr; -const char descriptor_table_protodef_Services_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { + +const char descriptor_table_protodef_Services_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\016Services.proto\022\010protobuf\032\025Message2Clie" "nts.proto\032\024Message2Server.proto2\363\005\n\020Avai" "lableService\0223\n\rTryConnection\022\017.protobuf" @@ -49,13 +50,12 @@ const char descriptor_table_protodef_Services_2eproto[] PROTOBUF_SECTION_VARIABL "cycle\022\017.protobuf.IDMsg\032\021.protobuf.BoolRe" "s\0226\n\tBuildShip\022\026.protobuf.BuildShipMsg\032\021" ".protobuf.BoolRes\0222\n\014EndAllAction\022\017.prot" - "obuf.IDMsg\032\021.protobuf.BoolResb\006proto3"}; -static const ::_pbi::DescriptorTable* const descriptor_table_Services_2eproto_deps[2] = - { - &::descriptor_table_Message2Clients_2eproto, - &::descriptor_table_Message2Server_2eproto, + "obuf.IDMsg\032\021.protobuf.BoolResb\006proto3"; +static const ::_pbi::DescriptorTable* const descriptor_table_Services_2eproto_deps[2] = { + &::descriptor_table_Message2Clients_2eproto, + &::descriptor_table_Message2Server_2eproto, }; -static ::absl::once_flag descriptor_table_Services_2eproto_once; +static ::_pbi::once_flag descriptor_table_Services_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_Services_2eproto = { false, false, @@ -73,30 +73,20 @@ const ::_pbi::DescriptorTable descriptor_table_Services_2eproto = { file_level_enum_descriptors_Services_2eproto, file_level_service_descriptors_Services_2eproto, }; - -// This function exists to be marked as weak. -// It can significantly speed up compilation by breaking up LLVM's SCC -// in the .pb.cc translation units. Large translation units see a -// reduction of more than 35% of walltime for optimized builds. Without -// the weak attribute all the messages in the file, including all the -// vtables and everything they use become part of the same SCC through -// a cycle like: -// GetMetadata -> descriptor table -> default instances -> -// vtables -> GetMetadata -// By adding a weak function here we break the connection from the -// individual vtables back into the descriptor table. PROTOBUF_ATTRIBUTE_WEAK const ::_pbi::DescriptorTable* descriptor_table_Services_2eproto_getter() { return &descriptor_table_Services_2eproto; } + // Force running AddDescriptors() at dynamic initialization time. -PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 -static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Services_2eproto(&descriptor_table_Services_2eproto); +PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_Services_2eproto(&descriptor_table_Services_2eproto); namespace protobuf { + // @@protoc_insertion_point(namespace_scope) } // namespace protobuf PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_CLOSE + // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" +#include diff --git a/CAPI/cpp/proto/Services.pb.h b/CAPI/cpp/proto/Services.pb.h index c58807eb..2f192c4d 100644 --- a/CAPI/cpp/proto/Services.pb.h +++ b/CAPI/cpp/proto/Services.pb.h @@ -1,43 +1,38 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: Services.proto -#ifndef GOOGLE_PROTOBUF_INCLUDED_Services_2eproto_2epb_2eh -#define GOOGLE_PROTOBUF_INCLUDED_Services_2eproto_2epb_2eh +#ifndef GOOGLE_PROTOBUF_INCLUDED_Services_2eproto +#define GOOGLE_PROTOBUF_INCLUDED_Services_2eproto #include #include -#include -#include "google/protobuf/port_def.inc" -#if PROTOBUF_VERSION < 4023000 -#error "This file was generated by a newer version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please update" -#error "your headers." -#endif // PROTOBUF_VERSION - -#if 4023004 < PROTOBUF_MIN_PROTOC_VERSION -#error "This file was generated by an older version of protoc which is" -#error "incompatible with your Protocol Buffer headers. Please" -#error "regenerate this file with a newer version of protoc." -#endif // PROTOBUF_MIN_PROTOC_VERSION -#include "google/protobuf/port_undef.inc" -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/arena.h" -#include "google/protobuf/arenastring.h" -#include "google/protobuf/generated_message_util.h" -#include "google/protobuf/metadata_lite.h" -#include "google/protobuf/generated_message_reflection.h" -#include "google/protobuf/repeated_field.h" // IWYU pragma: export -#include "google/protobuf/extension_set.h" // IWYU pragma: export +#include +#if PROTOBUF_VERSION < 3021000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 3021006 < PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: export +#include // IWYU pragma: export #include "Message2Clients.pb.h" #include "Message2Server.pb.h" // @@protoc_insertion_point(includes) - -// Must be included last. -#include "google/protobuf/port_def.inc" - +#include #define PROTOBUF_INTERNAL_EXPORT_Services_2eproto - PROTOBUF_NAMESPACE_OPEN namespace internal { @@ -48,13 +43,11 @@ PROTOBUF_NAMESPACE_CLOSE // Internal implementation detail -- do not use these members. struct TableStruct_Services_2eproto { - static const ::uint32_t offsets[]; + static const uint32_t offsets[]; }; -extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable - descriptor_table_Services_2eproto; +extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_Services_2eproto; PROTOBUF_NAMESPACE_OPEN PROTOBUF_NAMESPACE_CLOSE - namespace protobuf { @@ -73,10 +66,10 @@ namespace protobuf #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) + } // namespace protobuf // @@protoc_insertion_point(global_scope) -#include "google/protobuf/port_undef.inc" - -#endif // GOOGLE_PROTOBUF_INCLUDED_Services_2eproto_2epb_2eh +#include +#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_Services_2eproto